John6666 commited on
Commit
b30172e
1 Parent(s): 5b32612

Upload 12 files

Browse files
Files changed (4) hide show
  1. README.md +1 -1
  2. app.py +137 -29
  3. color_image.png +0 -0
  4. spiral_no_transparent.png +0 -0
README.md CHANGED
@@ -1,6 +1,6 @@
1
  ---
2
  title: 🧩 DiffuseCraft Mod
3
- emoji: 🧩🖼️
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
 
1
  ---
2
  title: 🧩 DiffuseCraft Mod
3
+ emoji: 🧩🖼️📦
4
  colorFrom: red
5
  colorTo: pink
6
  sdk: gradio
app.py CHANGED
@@ -1,6 +1,3 @@
1
- #######################
2
- # UTILS
3
- #######################
4
  import spaces
5
  import os
6
  from stablepy import Model_Diffusers
@@ -25,6 +22,7 @@ from stablepy import (
25
  SD15_TASKS,
26
  SDXL_TASKS,
27
  )
 
28
 
29
  preprocessor_controlnet = {
30
  "openpose": [
@@ -59,7 +57,7 @@ preprocessor_controlnet = {
59
  "lineart": [
60
  "Lineart",
61
  "Lineart coarse",
62
- "LineartAnime",
63
  "None",
64
  "None (anime)",
65
  ],
@@ -96,7 +94,7 @@ task_stablepy = {
96
  'depth ControlNet': 'depth',
97
  'normalbae ControlNet': 'normalbae',
98
  'lineart ControlNet': 'lineart',
99
- 'lineart_anime ControlNet': 'lineart_anime',
100
  'shuffle ControlNet': 'shuffle',
101
  'ip2p ControlNet': 'ip2p',
102
  'optical pattern ControlNet': 'pattern',
@@ -108,7 +106,7 @@ task_model_list = list(task_stablepy.keys())
108
 
109
  def download_things(directory, url, hf_token="", civitai_api_key=""):
110
  url = url.strip()
111
-
112
  if "drive.google.com" in url:
113
  original_dir = os.getcwd()
114
  os.chdir(directory)
@@ -116,6 +114,7 @@ def download_things(directory, url, hf_token="", civitai_api_key=""):
116
  os.chdir(original_dir)
117
  elif "huggingface.co" in url:
118
  url = url.replace("?download=true", "")
 
119
  if "/blob/" in url:
120
  url = url.replace("/blob/", "/resolve/")
121
  user_header = f'"Authorization: Bearer {hf_token}"'
@@ -168,6 +167,7 @@ os.makedirs(directory_loras, exist_ok=True)
168
  directory_vaes = 'vaes'
169
  os.makedirs(directory_vaes, exist_ok=True)
170
 
 
171
  ## BEGIN MOD
172
  # - **Download SD 1.5 Models**
173
  #download_model = "https://huggingface.co/frankjoshua/toonyou_beta6/resolve/main/toonyou_beta6.safetensors"
@@ -215,7 +215,45 @@ def get_model_id_list():
215
 
216
  return model_ids
217
 
218
- load_diffusers_format_model = get_model_id_list()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
219
  ## END MOD
220
 
221
  CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
@@ -254,6 +292,25 @@ lora_model_list.insert(0, "None")
254
  vae_model_list = get_model_list(directory_vaes)
255
  vae_model_list.insert(0, "None")
256
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
257
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
258
 
259
  upscaler_dict_gui = {
@@ -337,6 +394,7 @@ warnings.filterwarnings(action="ignore", category=FutureWarning, module="transfo
337
  from stablepy import logger
338
  logger.setLevel(logging.DEBUG)
339
 
 
340
  ## BEGIN MOD
341
  from v2 import (
342
  V2UI,
@@ -371,6 +429,17 @@ def description_ui():
371
  ## END MOD
372
 
373
 
 
 
 
 
 
 
 
 
 
 
 
374
  class GuiSD:
375
  def __init__(self, stream=True):
376
  self.model = None
@@ -384,18 +453,12 @@ class GuiSD:
384
  retain_task_model_in_cache=False,
385
  )
386
 
387
- @spaces.GPU(duration=120)
388
- def infer(self, model, pipe_params):
389
- images, image_list = model(**pipe_params)
390
- return images
391
-
392
  def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
393
 
394
  yield f"Loading model: {model_name}"
395
 
396
  vae_model = vae_model if vae_model != "None" else None
397
 
398
-
399
  if model_name in model_list:
400
  model_is_xl = "xl" in model_name.lower()
401
  sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
@@ -405,6 +468,7 @@ class GuiSD:
405
  if incompatible_vae:
406
  vae_model = None
407
 
 
408
  self.model.load_pipe(
409
  model_name,
410
  task_name=task_stablepy[task],
@@ -412,7 +476,7 @@ class GuiSD:
412
  type_model_precision=torch.float16,
413
  retain_task_model_in_cache=False,
414
  )
415
- yield f"Model loaded: {model_name} {vae_model if vae_model else ''}"
416
 
417
  @spaces.GPU
418
  def generate_pipeline(
@@ -518,15 +582,14 @@ class GuiSD:
518
  model_ip2,
519
  mode_ip2,
520
  scale_ip2,
521
- # progress=gr.Progress(track_tqdm=True),
522
- # progress=gr.Progress()
523
  ):
524
-
525
- # progress(0.01, desc="Loading model...")
526
 
527
  vae_model = vae_model if vae_model != "None" else None
528
  loras_list = [lora1, lora2, lora3, lora4, lora5]
 
 
529
 
 
530
  if model_name in model_list:
531
  model_is_xl = "xl" in model_name.lower()
532
  sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
@@ -534,19 +597,23 @@ class GuiSD:
534
  incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
535
 
536
  if incompatible_vae:
537
- gr.Info(
538
  f"The selected VAE is for a { 'SD 1.5' if model_is_xl else 'SDXL' } model, but you"
539
  f" are using a { model_type } model. The default VAE "
540
  "will be used."
541
  )
 
 
542
  vae_model = None
543
 
544
  for la in loras_list:
545
- if la is not None and la != "None":
546
  print(la)
547
  lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
548
  if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
549
- gr.Info(f"The LoRA {la} is for { 'SD 1.5' if model_is_xl else 'SDXL' }, but you are using { model_type }.")
 
 
550
 
551
  task = task_stablepy[task]
552
 
@@ -731,22 +798,27 @@ class GuiSD:
731
  pipe_params["num_images"] = num_images
732
  gr.Info("Num images x 2 🎉")
733
 
734
- # print("Inference 1")
735
- # yield self.infer_short(self.model, pipe_params)
 
 
736
  for img, seed, data in self.model(**pipe_params):
737
- info_state = f"PROCESSING..."
738
  if data:
739
- info_state = f"COMPLETE: seeds={str(seed)}"
 
 
 
 
740
  yield img, info_state
741
 
742
 
743
  sd_gen = GuiSD()
744
 
745
  CSS ="""
746
- #main { width: 100%; }
747
  .contain { display: flex; flex-direction: column; }
748
  #component-0 { height: 100%; }
749
- #gallery { flex-grow: 2; }
750
  """
751
  sdxl_task = [k for k, v in task_stablepy.items() if v in SDXL_TASKS ]
752
  sd_task = [k for k, v in task_stablepy.items() if v in SD15_TASKS ]
@@ -780,7 +852,7 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
780
  with gr.Accordion("Model and Task", open=False):
781
  task_gui = gr.Dropdown(label="Task", choices=sdxl_task, value=task_model_list[0])
782
  model_name_gui = gr.Dropdown(label="Model", choices=model_list, value="votepurchase/animagine-xl-3.1", allow_custom_value=True)
783
- optimization_gui = gr.Radio(label="Optimization for SDXL", choices=["None", "Default", "SPO", "DPO", "DPO Turbo", "PCM 16step", "PCM 8step"], value="None", interactive=True)
784
  with gr.Accordion("Generate prompt from Image", open=False):
785
  input_image_gui = gr.Image(label="Input image", type="pil", sources=["upload", "clipboard"], height=256)
786
  with gr.Accordion(label="Advanced options", open=False):
@@ -936,7 +1008,9 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
936
  ## END MOD
937
 
938
  with gr.Accordion("Hires fix", open=False, visible=True):
 
939
  upscaler_keys = list(upscaler_dict_gui.keys())
 
940
  upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=upscaler_keys, value=upscaler_keys[0])
941
  upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=6., step=0.1, value=1.4, label="Upscale by")
942
  esrgan_tile_gui = gr.Slider(minimum=0, value=100, maximum=500, step=1, label="ESRGAN Tile")
@@ -946,6 +1020,7 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
946
  hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=["Use same sampler"] + scheduler_names[:-1], value="Use same sampler")
947
  hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
948
  hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
 
949
  with gr.Accordion("LoRA", open=False, visible=True):
950
  lora1_gui = gr.Dropdown(label="Lora1", choices=lora_model_list)
951
  lora_scale_1_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 1")
@@ -957,9 +1032,21 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
957
  lora_scale_4_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 4")
958
  lora5_gui = gr.Dropdown(label="Lora5", choices=lora_model_list)
959
  lora_scale_5_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 5")
 
 
 
 
 
 
 
 
 
 
960
  with gr.Accordion("IP-Adapter", open=False, visible=True):##############
 
961
  IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
962
  MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
 
963
  with gr.Accordion("IP-Adapter 1", open=False, visible=True):
964
  image_ip1 = gr.Image(label="IP Image", type="filepath")
965
  mask_ip1 = gr.Image(label="IP Mask", type="filepath")
@@ -972,6 +1059,7 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
972
  model_ip2 = gr.Dropdown(value="base", label="Model", choices=IP_MODELS)
973
  mode_ip2 = gr.Dropdown(value="style", label="Mode", choices=MODE_IP_OPTIONS)
974
  scale_ip2 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
 
975
  with gr.Accordion("ControlNet / Img2img / Inpaint", open=False, visible=True):
976
  image_control = gr.Image(label="Image ControlNet/Inpaint/Img2img", type="filepath")
977
  image_mask_gui = gr.Image(label="Image Mask", type="filepath")
@@ -981,6 +1069,7 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
981
  )
982
  image_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution")
983
  preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=preprocessor_controlnet["canny"])
 
984
  def change_preprocessor_choices(task):
985
  task = task_stablepy[task]
986
  if task in preprocessor_controlnet.keys():
@@ -988,6 +1077,7 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
988
  else:
989
  choices_task = preprocessor_controlnet["canny"]
990
  return gr.update(choices=choices_task, value=choices_task[0])
 
991
  task_gui.change(
992
  change_preprocessor_choices,
993
  [task_gui],
@@ -1001,15 +1091,19 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
1001
  control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
1002
  control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
1003
  control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
 
1004
  with gr.Accordion("T2I adapter", open=False, visible=True):
1005
  t2i_adapter_preprocessor_gui = gr.Checkbox(value=True, label="T2i Adapter Preprocessor")
1006
  adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
1007
  adapter_conditioning_factor_gui = gr.Slider(minimum=0, maximum=1., step=0.01, value=0.55, label="Adapter Conditioning Factor (%)")
 
1008
  with gr.Accordion("Styles", open=False, visible=True):
 
1009
  try:
1010
  style_names_found = sd_gen.model.STYLE_NAMES
1011
  except:
1012
  style_names_found = STYLE_NAMES
 
1013
  style_prompt_gui = gr.Dropdown(
1014
  style_names_found,
1015
  multiselect=True,
@@ -1019,24 +1113,33 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
1019
  )
1020
  style_json_gui = gr.File(label="Style JSON File")
1021
  style_button = gr.Button("Load styles")
 
1022
  def load_json_style_file(json):
1023
  if not sd_gen.model:
1024
  gr.Info("First load the model")
1025
  return gr.update(value=None, choices=STYLE_NAMES)
 
1026
  sd_gen.model.load_style_file(json)
1027
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
1028
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
 
1029
  style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
 
1030
  with gr.Accordion("Textual inversion", open=False, visible=False):
1031
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
 
1032
  with gr.Accordion("Detailfix", open=False, visible=True):
 
1033
  # Adetailer Inpaint Only
1034
  adetailer_inpaint_only_gui = gr.Checkbox(label="Inpaint only", value=True)
 
1035
  # Adetailer Verbose
1036
  adetailer_verbose_gui = gr.Checkbox(label="Verbose", value=False)
 
1037
  # Adetailer Sampler
1038
  adetailer_sampler_options = ["Use same sampler"] + scheduler_names[:-1]
1039
  adetailer_sampler_gui = gr.Dropdown(label="Adetailer sampler:", choices=adetailer_sampler_options, value="Use same sampler")
 
1040
  with gr.Accordion("Detailfix A", open=False, visible=True):
1041
  # Adetailer A
1042
  adetailer_active_a_gui = gr.Checkbox(label="Enable Adetailer A", value=False)
@@ -1049,6 +1152,7 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
1049
  mask_dilation_a_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
1050
  mask_blur_a_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
1051
  mask_padding_a_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
 
1052
  with gr.Accordion("Detailfix B", open=False, visible=True):
1053
  # Adetailer B
1054
  adetailer_active_b_gui = gr.Checkbox(label="Enable Adetailer B", value=False)
@@ -1061,11 +1165,13 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
1061
  mask_dilation_b_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
1062
  mask_blur_b_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
1063
  mask_padding_b_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
 
1064
  with gr.Accordion("Other settings", open=False, visible=True):
1065
  image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
1066
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1067
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1068
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
 
1069
  with gr.Accordion("More settings", open=False, visible=False):
1070
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1071
  retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
@@ -1080,7 +1186,7 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
1080
  xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
1081
 
1082
  ## BEGIN MOD
1083
- with gr.Accordion("Examples", open=True, visible=True):
1084
  gr.Examples(
1085
  examples=[
1086
  [
@@ -1337,6 +1443,8 @@ with gr.Blocks(theme="NoCrypt/miku", elem_id="main", css=CSS) as app:
1337
  return 28, 7., 'Euler a', True, 'loras/sdxl-DPO-LoRA.safetensors', 1.
1338
  elif opt == 'DPO Turbo':
1339
  return 8, 2.5, 'LCM', True, 'loras/sd_xl_dpo_turbo_lora_v1-128dim.safetensors', 1.
 
 
1340
  elif opt == 'Default':
1341
  return 28, 7., 'Euler a', False, None, 1.
1342
  else: # None
 
 
 
 
1
  import spaces
2
  import os
3
  from stablepy import Model_Diffusers
 
22
  SD15_TASKS,
23
  SDXL_TASKS,
24
  )
25
+ import urllib.parse
26
 
27
  preprocessor_controlnet = {
28
  "openpose": [
 
57
  "lineart": [
58
  "Lineart",
59
  "Lineart coarse",
60
+ "Lineart (anime)",
61
  "None",
62
  "None (anime)",
63
  ],
 
94
  'depth ControlNet': 'depth',
95
  'normalbae ControlNet': 'normalbae',
96
  'lineart ControlNet': 'lineart',
97
+ # 'lineart_anime ControlNet': 'lineart_anime',
98
  'shuffle ControlNet': 'shuffle',
99
  'ip2p ControlNet': 'ip2p',
100
  'optical pattern ControlNet': 'pattern',
 
106
 
107
  def download_things(directory, url, hf_token="", civitai_api_key=""):
108
  url = url.strip()
109
+
110
  if "drive.google.com" in url:
111
  original_dir = os.getcwd()
112
  os.chdir(directory)
 
114
  os.chdir(original_dir)
115
  elif "huggingface.co" in url:
116
  url = url.replace("?download=true", "")
117
+ # url = urllib.parse.quote(url, safe=':/') # fix encoding
118
  if "/blob/" in url:
119
  url = url.replace("/blob/", "/resolve/")
120
  user_header = f'"Authorization: Bearer {hf_token}"'
 
167
  directory_vaes = 'vaes'
168
  os.makedirs(directory_vaes, exist_ok=True)
169
 
170
+
171
  ## BEGIN MOD
172
  # - **Download SD 1.5 Models**
173
  #download_model = "https://huggingface.co/frankjoshua/toonyou_beta6/resolve/main/toonyou_beta6.safetensors"
 
215
 
216
  return model_ids
217
 
218
+ load_diffusers_format_model = [
219
+ 'stabilityai/stable-diffusion-xl-base-1.0',
220
+ 'cagliostrolab/animagine-xl-3.1',
221
+ 'misri/epicrealismXL_v7FinalDestination',
222
+ 'misri/juggernautXL_juggernautX',
223
+ 'misri/zavychromaxl_v80',
224
+ 'SG161222/RealVisXL_V4.0',
225
+ 'misri/newrealityxlAllInOne_Newreality40',
226
+ 'eienmojiki/Anything-XL',
227
+ 'eienmojiki/Starry-XL-v5.2',
228
+ 'gsdf/CounterfeitXL',
229
+ 'kitty7779/ponyDiffusionV6XL',
230
+ 'John6666/ebara-mfcg-pony-mix-v12-sdxl',
231
+ 'John6666/t-ponynai3-v51-sdxl',
232
+ 'yodayo-ai/kivotos-xl-2.0',
233
+ 'yodayo-ai/holodayo-xl-2.1',
234
+ 'digiplay/majicMIX_sombre_v2',
235
+ 'digiplay/majicMIX_realistic_v6',
236
+ 'digiplay/majicMIX_realistic_v7',
237
+ 'digiplay/DreamShaper_8',
238
+ 'digiplay/BeautifulArt_v1',
239
+ 'digiplay/DarkSushi2.5D_v1',
240
+ 'digiplay/darkphoenix3D_v1.1',
241
+ 'digiplay/BeenYouLiteL11_diffusers',
242
+ 'rubbrband/revAnimated_v2Rebirth',
243
+ 'youknownothing/cyberrealistic_v50',
244
+ 'votepurchase/counterfeitV30_v30',
245
+ 'Meina/MeinaMix_V11',
246
+ 'Meina/MeinaUnreal_V5',
247
+ 'Meina/MeinaPastel_V7',
248
+ 'rubbrband/realcartoon3d_v16',
249
+ 'rubbrband/realcartoonRealistic_v14',
250
+ 'KBlueLeaf/Kohaku-XL-Epsilon-rev3',
251
+ ]
252
+
253
+ def list_uniq(l):
254
+ return sorted(set(l), key=l.index)
255
+
256
+ load_diffusers_format_model = list_uniq(get_model_id_list() + load_diffusers_format_model)
257
  ## END MOD
258
 
259
  CIVITAI_API_KEY = os.environ.get("CIVITAI_API_KEY")
 
292
  vae_model_list = get_model_list(directory_vaes)
293
  vae_model_list.insert(0, "None")
294
 
295
+ def get_my_lora(link_url):
296
+ for url in [url.strip() for url in link_url.split(',')]:
297
+ if not os.path.exists(f"./loras/{url.split('/')[-1]}"):
298
+ download_things(directory_loras, url, hf_token, CIVITAI_API_KEY)
299
+ new_lora_model_list = get_model_list(directory_loras)
300
+ new_lora_model_list.insert(0, "None")
301
+
302
+ return gr.update(
303
+ choices=new_lora_model_list
304
+ ), gr.update(
305
+ choices=new_lora_model_list
306
+ ), gr.update(
307
+ choices=new_lora_model_list
308
+ ), gr.update(
309
+ choices=new_lora_model_list
310
+ ), gr.update(
311
+ choices=new_lora_model_list
312
+ ),
313
+
314
  print('\033[33m🏁 Download and listing of valid models completed.\033[0m')
315
 
316
  upscaler_dict_gui = {
 
394
  from stablepy import logger
395
  logger.setLevel(logging.DEBUG)
396
 
397
+
398
  ## BEGIN MOD
399
  from v2 import (
400
  V2UI,
 
429
  ## END MOD
430
 
431
 
432
+ def info_html(json_data, title, subtitle):
433
+ return f"""
434
+ <div style='padding: 0; border-radius: 10px;'>
435
+ <p style='margin: 0; font-weight: bold;'>{title}</p>
436
+ <details>
437
+ <summary>Details</summary>
438
+ <p style='margin: 0; font-weight: bold;'>{subtitle}</p>
439
+ </details>
440
+ </div>
441
+ """
442
+
443
  class GuiSD:
444
  def __init__(self, stream=True):
445
  self.model = None
 
453
  retain_task_model_in_cache=False,
454
  )
455
 
 
 
 
 
 
456
  def load_new_model(self, model_name, vae_model, task, progress=gr.Progress(track_tqdm=True)):
457
 
458
  yield f"Loading model: {model_name}"
459
 
460
  vae_model = vae_model if vae_model != "None" else None
461
 
 
462
  if model_name in model_list:
463
  model_is_xl = "xl" in model_name.lower()
464
  sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
 
468
  if incompatible_vae:
469
  vae_model = None
470
 
471
+
472
  self.model.load_pipe(
473
  model_name,
474
  task_name=task_stablepy[task],
 
476
  type_model_precision=torch.float16,
477
  retain_task_model_in_cache=False,
478
  )
479
+ yield f"Model loaded: {model_name}"
480
 
481
  @spaces.GPU
482
  def generate_pipeline(
 
582
  model_ip2,
583
  mode_ip2,
584
  scale_ip2,
 
 
585
  ):
 
 
586
 
587
  vae_model = vae_model if vae_model != "None" else None
588
  loras_list = [lora1, lora2, lora3, lora4, lora5]
589
+ vae_msg = f"VAE: {vae_model}" if vae_model else ""
590
+ msg_lora = []
591
 
592
+
593
  if model_name in model_list:
594
  model_is_xl = "xl" in model_name.lower()
595
  sdxl_in_vae = vae_model and "sdxl" in vae_model.lower()
 
597
  incompatible_vae = (model_is_xl and vae_model and not sdxl_in_vae) or (not model_is_xl and sdxl_in_vae)
598
 
599
  if incompatible_vae:
600
+ msg_inc_vae = (
601
  f"The selected VAE is for a { 'SD 1.5' if model_is_xl else 'SDXL' } model, but you"
602
  f" are using a { model_type } model. The default VAE "
603
  "will be used."
604
  )
605
+ gr.Info(msg_inc_vae)
606
+ vae_msg = msg_inc_vae
607
  vae_model = None
608
 
609
  for la in loras_list:
610
+ if la is not None and la != "None" and la in lora_model_list:
611
  print(la)
612
  lora_type = ("animetarot" in la.lower() or "Hyper-SD15-8steps".lower() in la.lower())
613
  if (model_is_xl and lora_type) or (not model_is_xl and not lora_type):
614
+ msg_inc_lora = f"The LoRA {la} is for { 'SD 1.5' if model_is_xl else 'SDXL' }, but you are using { model_type }."
615
+ gr.Info(msg_inc_lora)
616
+ msg_lora.append(msg_inc_lora)
617
 
618
  task = task_stablepy[task]
619
 
 
798
  pipe_params["num_images"] = num_images
799
  gr.Info("Num images x 2 🎉")
800
 
801
+ # Maybe fix lora issue: 'Cannot copy out of meta tensor; no data!''
802
+ self.model.pipe.to("cuda:0" if torch.cuda.is_available() else "cpu")
803
+
804
+ info_state = f"PROCESSING "
805
  for img, seed, data in self.model(**pipe_params):
806
+ info_state += ">"
807
  if data:
808
+ info_state = f"COMPLETED. Seeds: {str(seed)}"
809
+ if vae_msg:
810
+ info_state = info_state + "<br>" + vae_msg
811
+ if msg_lora:
812
+ info_state = info_state + "<br>" + "<br>".join(msg_lora)
813
  yield img, info_state
814
 
815
 
816
  sd_gen = GuiSD()
817
 
818
  CSS ="""
 
819
  .contain { display: flex; flex-direction: column; }
820
  #component-0 { height: 100%; }
821
+ #gallery { flex-grow: 1; }
822
  """
823
  sdxl_task = [k for k, v in task_stablepy.items() if v in SDXL_TASKS ]
824
  sd_task = [k for k, v in task_stablepy.items() if v in SD15_TASKS ]
 
852
  with gr.Accordion("Model and Task", open=False):
853
  task_gui = gr.Dropdown(label="Task", choices=sdxl_task, value=task_model_list[0])
854
  model_name_gui = gr.Dropdown(label="Model", choices=model_list, value="votepurchase/animagine-xl-3.1", allow_custom_value=True)
855
+ optimization_gui = gr.Radio(label="Optimization for SDXL", choices=["None", "Default", "SPO", "DPO", "DPO Turbo", "Hyper-SDXL", "PCM 16step", "PCM 8step"], value="None", interactive=True)
856
  with gr.Accordion("Generate prompt from Image", open=False):
857
  input_image_gui = gr.Image(label="Input image", type="pil", sources=["upload", "clipboard"], height=256)
858
  with gr.Accordion(label="Advanced options", open=False):
 
1008
  ## END MOD
1009
 
1010
  with gr.Accordion("Hires fix", open=False, visible=True):
1011
+
1012
  upscaler_keys = list(upscaler_dict_gui.keys())
1013
+
1014
  upscaler_model_path_gui = gr.Dropdown(label="Upscaler", choices=upscaler_keys, value=upscaler_keys[0])
1015
  upscaler_increases_size_gui = gr.Slider(minimum=1.1, maximum=6., step=0.1, value=1.4, label="Upscale by")
1016
  esrgan_tile_gui = gr.Slider(minimum=0, value=100, maximum=500, step=1, label="ESRGAN Tile")
 
1020
  hires_sampler_gui = gr.Dropdown(label="Hires Sampler", choices=["Use same sampler"] + scheduler_names[:-1], value="Use same sampler")
1021
  hires_prompt_gui = gr.Textbox(label="Hires Prompt", placeholder="Main prompt will be use", lines=3)
1022
  hires_negative_prompt_gui = gr.Textbox(label="Hires Negative Prompt", placeholder="Main negative prompt will be use", lines=3)
1023
+
1024
  with gr.Accordion("LoRA", open=False, visible=True):
1025
  lora1_gui = gr.Dropdown(label="Lora1", choices=lora_model_list)
1026
  lora_scale_1_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 1")
 
1032
  lora_scale_4_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 4")
1033
  lora5_gui = gr.Dropdown(label="Lora5", choices=lora_model_list)
1034
  lora_scale_5_gui = gr.Slider(minimum=-2, maximum=2, step=0.01, value=0.33, label="Lora Scale 5")
1035
+
1036
+ with gr.Accordion("From URL", open=False, visible=True):
1037
+ text_lora = gr.Textbox(label="URL", placeholder="http://...my_lora_url.safetensors", lines=1)
1038
+ button_lora = gr.Button("Get and update lists of LoRAs")
1039
+ button_lora.click(
1040
+ get_my_lora,
1041
+ [text_lora],
1042
+ [lora1_gui, lora2_gui, lora3_gui, lora4_gui, lora5_gui]
1043
+ )
1044
+
1045
  with gr.Accordion("IP-Adapter", open=False, visible=True):##############
1046
+
1047
  IP_MODELS = sorted(list(set(IP_ADAPTERS_SD + IP_ADAPTERS_SDXL)))
1048
  MODE_IP_OPTIONS = ["original", "style", "layout", "style+layout"]
1049
+
1050
  with gr.Accordion("IP-Adapter 1", open=False, visible=True):
1051
  image_ip1 = gr.Image(label="IP Image", type="filepath")
1052
  mask_ip1 = gr.Image(label="IP Mask", type="filepath")
 
1059
  model_ip2 = gr.Dropdown(value="base", label="Model", choices=IP_MODELS)
1060
  mode_ip2 = gr.Dropdown(value="style", label="Mode", choices=MODE_IP_OPTIONS)
1061
  scale_ip2 = gr.Slider(minimum=0., maximum=2., step=0.01, value=0.7, label="Scale")
1062
+
1063
  with gr.Accordion("ControlNet / Img2img / Inpaint", open=False, visible=True):
1064
  image_control = gr.Image(label="Image ControlNet/Inpaint/Img2img", type="filepath")
1065
  image_mask_gui = gr.Image(label="Image Mask", type="filepath")
 
1069
  )
1070
  image_resolution_gui = gr.Slider(minimum=64, maximum=2048, step=64, value=1024, label="Image Resolution")
1071
  preprocessor_name_gui = gr.Dropdown(label="Preprocessor Name", choices=preprocessor_controlnet["canny"])
1072
+
1073
  def change_preprocessor_choices(task):
1074
  task = task_stablepy[task]
1075
  if task in preprocessor_controlnet.keys():
 
1077
  else:
1078
  choices_task = preprocessor_controlnet["canny"]
1079
  return gr.update(choices=choices_task, value=choices_task[0])
1080
+
1081
  task_gui.change(
1082
  change_preprocessor_choices,
1083
  [task_gui],
 
1091
  control_net_output_scaling_gui = gr.Slider(minimum=0, maximum=5.0, step=0.1, value=1, label="ControlNet Output Scaling in UNet")
1092
  control_net_start_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=0, label="ControlNet Start Threshold (%)")
1093
  control_net_stop_threshold_gui = gr.Slider(minimum=0, maximum=1, step=0.01, value=1, label="ControlNet Stop Threshold (%)")
1094
+
1095
  with gr.Accordion("T2I adapter", open=False, visible=True):
1096
  t2i_adapter_preprocessor_gui = gr.Checkbox(value=True, label="T2i Adapter Preprocessor")
1097
  adapter_conditioning_scale_gui = gr.Slider(minimum=0, maximum=5., step=0.1, value=1, label="Adapter Conditioning Scale")
1098
  adapter_conditioning_factor_gui = gr.Slider(minimum=0, maximum=1., step=0.01, value=0.55, label="Adapter Conditioning Factor (%)")
1099
+
1100
  with gr.Accordion("Styles", open=False, visible=True):
1101
+
1102
  try:
1103
  style_names_found = sd_gen.model.STYLE_NAMES
1104
  except:
1105
  style_names_found = STYLE_NAMES
1106
+
1107
  style_prompt_gui = gr.Dropdown(
1108
  style_names_found,
1109
  multiselect=True,
 
1113
  )
1114
  style_json_gui = gr.File(label="Style JSON File")
1115
  style_button = gr.Button("Load styles")
1116
+
1117
  def load_json_style_file(json):
1118
  if not sd_gen.model:
1119
  gr.Info("First load the model")
1120
  return gr.update(value=None, choices=STYLE_NAMES)
1121
+
1122
  sd_gen.model.load_style_file(json)
1123
  gr.Info(f"{len(sd_gen.model.STYLE_NAMES)} styles loaded")
1124
  return gr.update(value=None, choices=sd_gen.model.STYLE_NAMES)
1125
+
1126
  style_button.click(load_json_style_file, [style_json_gui], [style_prompt_gui])
1127
+
1128
  with gr.Accordion("Textual inversion", open=False, visible=False):
1129
  active_textual_inversion_gui = gr.Checkbox(value=False, label="Active Textual Inversion in prompt")
1130
+
1131
  with gr.Accordion("Detailfix", open=False, visible=True):
1132
+
1133
  # Adetailer Inpaint Only
1134
  adetailer_inpaint_only_gui = gr.Checkbox(label="Inpaint only", value=True)
1135
+
1136
  # Adetailer Verbose
1137
  adetailer_verbose_gui = gr.Checkbox(label="Verbose", value=False)
1138
+
1139
  # Adetailer Sampler
1140
  adetailer_sampler_options = ["Use same sampler"] + scheduler_names[:-1]
1141
  adetailer_sampler_gui = gr.Dropdown(label="Adetailer sampler:", choices=adetailer_sampler_options, value="Use same sampler")
1142
+
1143
  with gr.Accordion("Detailfix A", open=False, visible=True):
1144
  # Adetailer A
1145
  adetailer_active_a_gui = gr.Checkbox(label="Enable Adetailer A", value=False)
 
1152
  mask_dilation_a_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
1153
  mask_blur_a_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
1154
  mask_padding_a_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
1155
+
1156
  with gr.Accordion("Detailfix B", open=False, visible=True):
1157
  # Adetailer B
1158
  adetailer_active_b_gui = gr.Checkbox(label="Enable Adetailer B", value=False)
 
1165
  mask_dilation_b_gui = gr.Number(label="Mask dilation:", value=4, minimum=1)
1166
  mask_blur_b_gui = gr.Number(label="Mask blur:", value=4, minimum=1)
1167
  mask_padding_b_gui = gr.Number(label="Mask padding:", value=32, minimum=1)
1168
+
1169
  with gr.Accordion("Other settings", open=False, visible=True):
1170
  image_previews_gui = gr.Checkbox(value=True, label="Image Previews")
1171
  hires_before_adetailer_gui = gr.Checkbox(value=False, label="Hires Before Adetailer")
1172
  hires_after_adetailer_gui = gr.Checkbox(value=True, label="Hires After Adetailer")
1173
  generator_in_cpu_gui = gr.Checkbox(value=False, label="Generator in CPU")
1174
+
1175
  with gr.Accordion("More settings", open=False, visible=False):
1176
  loop_generation_gui = gr.Slider(minimum=1, value=1, label="Loop Generation")
1177
  retain_task_cache_gui = gr.Checkbox(value=False, label="Retain task model in cache")
 
1186
  xformers_memory_efficient_attention_gui = gr.Checkbox(value=False, label="Xformers Memory Efficient Attention")
1187
 
1188
  ## BEGIN MOD
1189
+ with gr.Accordion("Examples and help", open=True, visible=True):
1190
  gr.Examples(
1191
  examples=[
1192
  [
 
1443
  return 28, 7., 'Euler a', True, 'loras/sdxl-DPO-LoRA.safetensors', 1.
1444
  elif opt == 'DPO Turbo':
1445
  return 8, 2.5, 'LCM', True, 'loras/sd_xl_dpo_turbo_lora_v1-128dim.safetensors', 1.
1446
+ elif opt == 'Hyper-SDXL':
1447
+ return 12, 6., 'Euler a', True, 'loras/Hyper-SDXL-12steps-CFG-lora.safetensors', 1.
1448
  elif opt == 'Default':
1449
  return 28, 7., 'Euler a', False, None, 1.
1450
  else: # None
color_image.png ADDED
spiral_no_transparent.png ADDED