John6666 commited on
Commit
d0d2198
1 Parent(s): 4b234e2

Upload 3 files

Browse files
Files changed (3) hide show
  1. app.py +65 -39
  2. mod.py +8 -3
  3. requirements.txt +1 -1
app.py CHANGED
@@ -24,6 +24,7 @@ from tagger.fl2flux import predict_tags_fl2_flux
24
  # Initialize the base model
25
  base_model = models[0]
26
  controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union'
 
27
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
28
  controlnet_union = None
29
  controlnet = None
@@ -39,32 +40,32 @@ def change_base_model(repo_id: str, cn_on: bool, progress=gr.Progress(track_tqdm
39
  global last_model
40
  global last_cn_on
41
  dtype = torch.bfloat16
 
42
  try:
43
- if (repo_id == last_model and cn_on is last_cn_on) or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return
44
  if cn_on:
45
- progress(0, desc=f"Loading model: {repo_id} / Loading ControlNet: {controlnet_model_union_repo}")
46
  print(f"Loading model: {repo_id} / Loading ControlNet: {controlnet_model_union_repo}")
47
  clear_cache()
48
  controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union_repo, torch_dtype=dtype)
49
  controlnet = FluxMultiControlNetModel([controlnet_union])
50
  pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=dtype)
51
- #pipe.enable_model_cpu_offload()
52
  last_model = repo_id
53
  last_cn_on = cn_on
54
- progress(1, desc=f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
55
  print(f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
56
  else:
57
- progress(0, desc=f"Loading model: {repo_id}")
58
  print(f"Loading model: {repo_id}")
59
  clear_cache()
60
  pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype)
61
- #pipe.enable_model_cpu_offload()
62
  last_model = repo_id
63
  last_cn_on = cn_on
64
- progress(1, desc=f"Model loaded: {repo_id}")
65
  print(f"Model loaded: {repo_id}")
66
  except Exception as e:
67
- print(e)
 
68
  return gr.update(visible=True)
69
 
70
  change_base_model.zerogpu = True
@@ -113,15 +114,16 @@ def update_selection(evt: gr.SelectData, width, height):
113
 
114
  @spaces.GPU(duration=70)
115
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress=gr.Progress(track_tqdm=True)):
116
- pipe.to("cuda")
117
- #controlnet.to("cuda")
118
- #controlnet_union.to("cuda")
119
- generator = torch.Generator(device="cuda").manual_seed(seed)
120
-
121
- with calculateDuration("Generating image"):
122
- # Generate image
123
- modes, images, scales = get_control_params()
124
- try:
 
125
  if not cn_on or len(modes) == 0:
126
  progress(0, desc="Start Inference.")
127
  image = pipe(
@@ -135,6 +137,8 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
135
  ).images[0]
136
  else:
137
  progress(0, desc="Start Inference with ControlNet.")
 
 
138
  image = pipe(
139
  prompt=prompt_mash,
140
  control_image=images,
@@ -147,14 +151,14 @@ def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scal
147
  generator=generator,
148
  joint_attention_kwargs={"scale": lora_scale},
149
  ).images[0]
150
- except Exception as e:
151
- progress(1, desc=f"Inference Error: {e}")
152
- print(e)
153
- raise Exception(f"Inference Error: {e}")
154
  return image
155
 
156
  def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height,
157
  lora_scale, lora_json, cn_on, progress=gr.Progress(track_tqdm=True)):
 
158
  if selected_index is None and not is_valid_lora(lora_json):
159
  gr.Info("LoRA isn't selected.")
160
  # raise gr.Error("You must select a LoRA before proceeding.")
@@ -192,7 +196,7 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
192
  if randomize_seed:
193
  seed = random.randint(0, MAX_SEED)
194
 
195
- progress(1, desc="Preparing Inference.")
196
 
197
  image = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress)
198
  if is_valid_lora(lora_json):
@@ -200,6 +204,8 @@ def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, wid
200
  pipe.unload_lora_weights()
201
  if selected_index is not None: pipe.unload_lora_weights()
202
  pipe.to("cpu")
 
 
203
  clear_cache()
204
  return image, seed
205
 
@@ -390,6 +396,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as app:
390
  cn_mode = [None] * num_cns
391
  cn_scale = [None] * num_cns
392
  cn_image = [None] * num_cns
 
393
  cn_res = [None] * num_cns
394
  cn_num = [None] * num_cns
395
  for i in range(num_cns):
@@ -399,42 +406,56 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as app:
399
  cn_scale[i] = gr.Slider(label=f"ControlNet {int(i+1)} Weight", minimum=0.0, maximum=1.0, step=0.01, value=0.75)
400
  cn_res[i] = gr.Slider(label=f"ControlNet {int(i+1)} Preprocess resolution", minimum=128, maximum=512, value=384, step=1)
401
  cn_num[i] = gr.Number(i, visible=False)
402
- cn_image[i] = gr.Image(type="pil", label="Control Image", height=256, show_share_button=False)
 
 
403
 
404
  gallery.select(
405
  update_selection,
406
  inputs=[width, height],
407
- outputs=[prompt, selected_info, selected_index, width, height]
 
 
408
  )
409
  custom_lora.input(
410
  add_custom_lora,
411
  inputs=[custom_lora],
412
- outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt]
 
 
413
  )
414
  custom_lora_button.click(
415
  remove_custom_lora,
416
- outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora]
 
 
417
  )
418
  gr.on(
419
  triggers=[generate_button.click, prompt.submit],
420
  fn=change_base_model,
421
  inputs=[model_name, cn_on],
422
- outputs=[result]
 
 
423
  ).success(
424
  fn=run_lora,
425
  inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height,
426
  lora_scale, lora_repo_json, cn_on],
427
- outputs=[result, seed]
 
 
428
  )
429
 
430
- deselect_lora_button.click(deselect_lora, None, [prompt, selected_info, selected_index, width, height])
431
  gr.on(
432
  triggers=[model_name.change, cn_on.change],
433
  fn=change_base_model,
434
  inputs=[model_name, cn_on],
435
- outputs=[result]
 
 
436
  )
437
- prompt_enhance.click(enhance_prompt, [prompt], [prompt])
438
 
439
  gr.on(
440
  triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
@@ -449,7 +470,7 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as app:
449
  lora_search_civitai_result.change(select_civitai_lora, [lora_search_civitai_result], [lora_download_url, lora_search_civitai_desc], scroll_to_output=True, queue=False, show_api=False)
450
 
451
  for i, l in enumerate(lora_repo):
452
- deselect_lora_button.click(lambda: ("", 1.0), None, [lora_repo[i], lora_wt[i]])
453
  gr.on(
454
  triggers=[lora_download[i].click],
455
  fn=download_my_lora,
@@ -479,8 +500,8 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as app:
479
  outputs=[cn_on],
480
  queue=True,
481
  show_api=False,
482
- )
483
- cn_image[i].upload(set_control_union_image, [cn_num[i], cn_mode[i], cn_image[i], height, width, cn_res[i]], [cn_image[i]])
484
 
485
  tagger_generate_from_image.click(
486
  lambda: ("", "", ""), None, [v2_series, v2_character, prompt], queue=False, show_api=False,
@@ -582,7 +603,8 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as app:
582
  pg_create_caption_button.click(
583
  create_caption,
584
  inputs=[pg_input_image],
585
- outputs=[pg_caption_output]
 
586
  )
587
 
588
  pg_generate_button.click(
@@ -592,20 +614,23 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as app:
592
  pg_additional_details, pg_photography_styles, pg_device, pg_photographer,
593
  pg_artist, pg_digital_artform,
594
  pg_place, pg_lighting, pg_clothing, pg_composition, pg_pose, pg_background],
595
- outputs=[pg_output, gr.Number(visible=False), pg_t5xxl_output, pg_clip_l_output, pg_clip_g_output]
 
596
  )
597
 
598
  pg_add_caption_button.click(
599
  prompt_generator.add_caption_to_prompt,
600
  inputs=[pg_output, pg_caption_output],
601
- outputs=[pg_output]
 
602
  )
603
 
604
  pg_generate_text_button.click(
605
  huggingface_node.generate,
606
  inputs=[pg_model, pg_output, pg_happy_talk, pg_compress, pg_compression_level,
607
  pg_poster, pg_custom_base_prompt],
608
- outputs=pg_text_output
 
609
  )
610
 
611
  def update_all_options(choice):
@@ -644,7 +669,8 @@ with gr.Blocks(theme='Nymbo/Nymbo_Theme', fill_width=True, css=css) as app:
644
  pg_roles, pg_hairstyles, pg_clothing,
645
  pg_place, pg_lighting, pg_composition, pg_pose, pg_background, pg_additional_details,
646
  pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform
647
- ]
 
648
  )
649
 
650
  app.queue()
 
24
  # Initialize the base model
25
  base_model = models[0]
26
  controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union'
27
+ #controlnet_model_union_repo = 'InstantX/FLUX.1-dev-Controlnet-Union-alpha'
28
  pipe = DiffusionPipeline.from_pretrained(base_model, torch_dtype=torch.bfloat16)
29
  controlnet_union = None
30
  controlnet = None
 
40
  global last_model
41
  global last_cn_on
42
  dtype = torch.bfloat16
43
+ #dtype = torch.float8_e4m3fn
44
  try:
45
+ if (repo_id == last_model and cn_on is last_cn_on) or not is_repo_name(repo_id) or not is_repo_exists(repo_id): return gr.update(visible=True)
46
  if cn_on:
47
+ #progress(0, desc=f"Loading model: {repo_id} / Loading ControlNet: {controlnet_model_union_repo}")
48
  print(f"Loading model: {repo_id} / Loading ControlNet: {controlnet_model_union_repo}")
49
  clear_cache()
50
  controlnet_union = FluxControlNetModel.from_pretrained(controlnet_model_union_repo, torch_dtype=dtype)
51
  controlnet = FluxMultiControlNetModel([controlnet_union])
52
  pipe = FluxControlNetPipeline.from_pretrained(repo_id, controlnet=controlnet, torch_dtype=dtype)
 
53
  last_model = repo_id
54
  last_cn_on = cn_on
55
+ #progress(1, desc=f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
56
  print(f"Model loaded: {repo_id} / ControlNet Loaded: {controlnet_model_union_repo}")
57
  else:
58
+ #progress(0, desc=f"Loading model: {repo_id}")
59
  print(f"Loading model: {repo_id}")
60
  clear_cache()
61
  pipe = DiffusionPipeline.from_pretrained(repo_id, torch_dtype=dtype)
 
62
  last_model = repo_id
63
  last_cn_on = cn_on
64
+ #progress(1, desc=f"Model loaded: {repo_id}")
65
  print(f"Model loaded: {repo_id}")
66
  except Exception as e:
67
+ print(f"Model load Error: {e}")
68
+ raise gr.Error(f"Model load Error: {e}")
69
  return gr.update(visible=True)
70
 
71
  change_base_model.zerogpu = True
 
114
 
115
  @spaces.GPU(duration=70)
116
  def generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress=gr.Progress(track_tqdm=True)):
117
+ global pipe
118
+ global controlnet
119
+ global controlnet_union
120
+ try:
121
+ pipe.to("cuda")
122
+ generator = torch.Generator(device="cuda").manual_seed(seed)
123
+
124
+ with calculateDuration("Generating image"):
125
+ # Generate image
126
+ modes, images, scales = get_control_params()
127
  if not cn_on or len(modes) == 0:
128
  progress(0, desc="Start Inference.")
129
  image = pipe(
 
137
  ).images[0]
138
  else:
139
  progress(0, desc="Start Inference with ControlNet.")
140
+ if controlnet is not None: controlnet.to("cuda")
141
+ if controlnet_union is not None: controlnet_union.to("cuda")
142
  image = pipe(
143
  prompt=prompt_mash,
144
  control_image=images,
 
151
  generator=generator,
152
  joint_attention_kwargs={"scale": lora_scale},
153
  ).images[0]
154
+ except Exception as e:
155
+ print(e)
156
+ raise gr.Error(f"Inference Error: {e}")
 
157
  return image
158
 
159
  def run_lora(prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height,
160
  lora_scale, lora_json, cn_on, progress=gr.Progress(track_tqdm=True)):
161
+ global pipe
162
  if selected_index is None and not is_valid_lora(lora_json):
163
  gr.Info("LoRA isn't selected.")
164
  # raise gr.Error("You must select a LoRA before proceeding.")
 
196
  if randomize_seed:
197
  seed = random.randint(0, MAX_SEED)
198
 
199
+ progress(0, desc="Running Inference.")
200
 
201
  image = generate_image(prompt_mash, steps, seed, cfg_scale, width, height, lora_scale, cn_on, progress)
202
  if is_valid_lora(lora_json):
 
204
  pipe.unload_lora_weights()
205
  if selected_index is not None: pipe.unload_lora_weights()
206
  pipe.to("cpu")
207
+ if controlnet is not None: controlnet.to("cpu")
208
+ if controlnet_union is not None: controlnet_union.to("cpu")
209
  clear_cache()
210
  return image, seed
211
 
 
396
  cn_mode = [None] * num_cns
397
  cn_scale = [None] * num_cns
398
  cn_image = [None] * num_cns
399
+ cn_image_ref = [None] * num_cns
400
  cn_res = [None] * num_cns
401
  cn_num = [None] * num_cns
402
  for i in range(num_cns):
 
406
  cn_scale[i] = gr.Slider(label=f"ControlNet {int(i+1)} Weight", minimum=0.0, maximum=1.0, step=0.01, value=0.75)
407
  cn_res[i] = gr.Slider(label=f"ControlNet {int(i+1)} Preprocess resolution", minimum=128, maximum=512, value=384, step=1)
408
  cn_num[i] = gr.Number(i, visible=False)
409
+ with gr.Row():
410
+ cn_image_ref[i] = gr.Image(label="Image Reference", type="pil", format="png", height=256, sources=["upload", "clipboard"], show_fullscreen_button=False, show_share_button=False)
411
+ cn_image[i] = gr.Image(label="Control Image", type="pil", format="png", height=256, show_share_button=False, show_fullscreen_button=False, interactive=False)
412
 
413
  gallery.select(
414
  update_selection,
415
  inputs=[width, height],
416
+ outputs=[prompt, selected_info, selected_index, width, height],
417
+ queue=False,
418
+ show_api=False,
419
  )
420
  custom_lora.input(
421
  add_custom_lora,
422
  inputs=[custom_lora],
423
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, prompt],
424
+ queue=False,
425
+ show_api=False,
426
  )
427
  custom_lora_button.click(
428
  remove_custom_lora,
429
+ outputs=[custom_lora_info, custom_lora_button, gallery, selected_info, selected_index, custom_lora],
430
+ queue=False,
431
+ show_api=False,
432
  )
433
  gr.on(
434
  triggers=[generate_button.click, prompt.submit],
435
  fn=change_base_model,
436
  inputs=[model_name, cn_on],
437
+ outputs=[result],
438
+ queue=False,
439
+ show_api=False,
440
  ).success(
441
  fn=run_lora,
442
  inputs=[prompt, cfg_scale, steps, selected_index, randomize_seed, seed, width, height,
443
  lora_scale, lora_repo_json, cn_on],
444
+ outputs=[result, seed],
445
+ queue=True,
446
+ show_api=True,
447
  )
448
 
449
+ deselect_lora_button.click(deselect_lora, None, [prompt, selected_info, selected_index, width, height], queue=False, show_api=False)
450
  gr.on(
451
  triggers=[model_name.change, cn_on.change],
452
  fn=change_base_model,
453
  inputs=[model_name, cn_on],
454
+ outputs=[result],
455
+ queue=True,
456
+ show_api=False,
457
  )
458
+ prompt_enhance.click(enhance_prompt, [prompt], [prompt], queue=False, show_api=False)
459
 
460
  gr.on(
461
  triggers=[lora_search_civitai_submit.click, lora_search_civitai_query.submit],
 
470
  lora_search_civitai_result.change(select_civitai_lora, [lora_search_civitai_result], [lora_download_url, lora_search_civitai_desc], scroll_to_output=True, queue=False, show_api=False)
471
 
472
  for i, l in enumerate(lora_repo):
473
+ deselect_lora_button.click(lambda: ("", 1.0), None, [lora_repo[i], lora_wt[i]], queue=False, show_api=False)
474
  gr.on(
475
  triggers=[lora_download[i].click],
476
  fn=download_my_lora,
 
500
  outputs=[cn_on],
501
  queue=True,
502
  show_api=False,
503
+ ).success(set_control_union_image, [cn_num[i], cn_mode[i], cn_image_ref[i], height, width, cn_res[i]], [cn_image[i]], queue=False, show_api=False)
504
+ cn_image_ref[i].upload(set_control_union_image, [cn_num[i], cn_mode[i], cn_image_ref[i], height, width, cn_res[i]], [cn_image[i]], queue=False, show_api=False)
505
 
506
  tagger_generate_from_image.click(
507
  lambda: ("", "", ""), None, [v2_series, v2_character, prompt], queue=False, show_api=False,
 
603
  pg_create_caption_button.click(
604
  create_caption,
605
  inputs=[pg_input_image],
606
+ outputs=[pg_caption_output],
607
+ show_api=False,
608
  )
609
 
610
  pg_generate_button.click(
 
614
  pg_additional_details, pg_photography_styles, pg_device, pg_photographer,
615
  pg_artist, pg_digital_artform,
616
  pg_place, pg_lighting, pg_clothing, pg_composition, pg_pose, pg_background],
617
+ outputs=[pg_output, gr.Number(visible=False), pg_t5xxl_output, pg_clip_l_output, pg_clip_g_output],
618
+ show_api=False,
619
  )
620
 
621
  pg_add_caption_button.click(
622
  prompt_generator.add_caption_to_prompt,
623
  inputs=[pg_output, pg_caption_output],
624
+ outputs=[pg_output],
625
+ show_api=False,
626
  )
627
 
628
  pg_generate_text_button.click(
629
  huggingface_node.generate,
630
  inputs=[pg_model, pg_output, pg_happy_talk, pg_compress, pg_compression_level,
631
  pg_poster, pg_custom_base_prompt],
632
+ outputs=pg_text_output,
633
+ show_api=False,
634
  )
635
 
636
  def update_all_options(choice):
 
669
  pg_roles, pg_hairstyles, pg_clothing,
670
  pg_place, pg_lighting, pg_composition, pg_pose, pg_background, pg_additional_details,
671
  pg_photography_styles, pg_device, pg_photographer, pg_artist, pg_digital_artform
672
+ ],
673
+ show_api=False,
674
  )
675
 
676
  app.queue()
mod.py CHANGED
@@ -169,6 +169,7 @@ def preprocess_image(image: Image.Image, control_mode: str, height: int, width:
169
  image_resolution = max(width, height)
170
  image_before = resize_image(expand2square(image.convert("RGB")), image_resolution, image_resolution, False)
171
  # generated control_
 
172
  print("start to generate control image")
173
  preprocessor = Preprocessor()
174
  if control_mode == "depth_midas":
@@ -219,7 +220,9 @@ def preprocess_image(image: Image.Image, control_mode: str, height: int, width:
219
  image_width, image_height = control_image.size
220
 
221
  image_after = resize_image(control_image, width, height, False)
222
- print(f"generate control image success: {image_width}x{image_height} => {width}x{height}")
 
 
223
  return image_after
224
 
225
 
@@ -236,8 +239,9 @@ def set_control_union_mode(i: int, mode: str, scale: str):
236
  else: return gr.update(visible=True)
237
 
238
 
239
- def set_control_union_image(i: int, mode: str, image: Image.Image, height: int, width: int, preprocess_resolution: int):
240
  global control_images
 
241
  control_images[i] = preprocess_image(image, mode, height, width, preprocess_resolution)
242
  return control_images[i]
243
 
@@ -267,8 +271,9 @@ def get_trigger_word(lorajson: list[dict]):
267
 
268
  # https://huggingface.co/docs/diffusers/v0.23.1/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora
269
  # https://github.com/huggingface/diffusers/issues/4919
270
- def fuse_loras(pipe, lorajson: list[dict]):
271
  if not lorajson or not isinstance(lorajson, list): return
 
272
  a_list = []
273
  w_list = []
274
  for d in lorajson:
 
169
  image_resolution = max(width, height)
170
  image_before = resize_image(expand2square(image.convert("RGB")), image_resolution, image_resolution, False)
171
  # generated control_
172
+ progress(0, desc="start to generate control image")
173
  print("start to generate control image")
174
  preprocessor = Preprocessor()
175
  if control_mode == "depth_midas":
 
220
  image_width, image_height = control_image.size
221
 
222
  image_after = resize_image(control_image, width, height, False)
223
+ ref_width, ref_height = image.size
224
+ progress(1, desc=f"generate control image success: {ref_width}x{ref_height} => {image_width}x{image_height}")
225
+ print(f"generate control image success: {ref_width}x{ref_height} => {image_width}x{image_height}")
226
  return image_after
227
 
228
 
 
239
  else: return gr.update(visible=True)
240
 
241
 
242
+ def set_control_union_image(i: int, mode: str, image: Image.Image | None, height: int, width: int, preprocess_resolution: int):
243
  global control_images
244
+ if image is None: return None
245
  control_images[i] = preprocess_image(image, mode, height, width, preprocess_resolution)
246
  return control_images[i]
247
 
 
271
 
272
  # https://huggingface.co/docs/diffusers/v0.23.1/en/api/loaders#diffusers.loaders.LoraLoaderMixin.fuse_lora
273
  # https://github.com/huggingface/diffusers/issues/4919
274
+ def fuse_loras(pipe, lorajson: list[dict], progress=gr.Progress(track_tqdm=True)):
275
  if not lorajson or not isinstance(lorajson, list): return
276
+ progress(0, desc="Fusing LoRA.")
277
  a_list = []
278
  w_list = []
279
  for d in lorajson:
requirements.txt CHANGED
@@ -4,7 +4,7 @@ torchvision
4
  huggingface_hub
5
  accelerate
6
  git+https://github.com/huggingface/diffusers
7
- git+https://github.com/huggingface/transformers
8
  peft
9
  sentencepiece
10
  timm
 
4
  huggingface_hub
5
  accelerate
6
  git+https://github.com/huggingface/diffusers
7
+ transformers
8
  peft
9
  sentencepiece
10
  timm