Dilshan Irugalbandara commited on
Commit
5bbca05
·
1 Parent(s): 7c3ef53

API Update

Browse files
Files changed (1) hide show
  1. app.py +2 -387
app.py CHANGED
@@ -136,30 +136,6 @@ automasker = AutoMasker(
136
  )
137
 
138
 
139
- # Flux-based CatVTON
140
- # access_token = os.getenv("HUGGING_FACE_HUB_TOKEN")
141
- # flux_repo = "black-forest-labs/FLUX.1-Fill-dev"
142
- # pipeline_flux = FluxTryOnPipeline.from_pretrained(flux_repo, use_auth_token=access_token)
143
- # pipeline_flux.load_lora_weights(
144
- # os.path.join(repo_path, "flux-lora"),
145
- # weight_name='pytorch_lora_weights.safetensors'
146
- # )
147
- # pipeline_flux.to("cuda", init_weight_dtype(args.mixed_precision))
148
-
149
-
150
- # Mask-free CatVTON
151
- # catvton_mf_repo = "zhengchong/CatVTON-MaskFree"
152
- # repo_path_mf = snapshot_download(repo_id=catvton_mf_repo, use_auth_token=access_token)
153
- # pipeline_p2p = CatVTONPix2PixPipeline(
154
- # base_ckpt=args.p2p_base_model_path,
155
- # attn_ckpt=repo_path_mf,
156
- # attn_ckpt_version="mix-48k-1024",
157
- # weight_dtype=init_weight_dtype(args.mixed_precision),
158
- # use_tf32=args.allow_tf32,
159
- # device='cuda'
160
- # )
161
-
162
-
163
  @spaces.GPU(duration=120)
164
  def submit_function(
165
  person_image,
@@ -170,8 +146,8 @@ def submit_function(
170
  seed,
171
  show_type
172
  ):
173
- person_image, mask = person_image["background"], person_image["layers"][0]
174
- mask = Image.open(mask).convert("L")
175
  if len(np.unique(np.array(mask))) == 1:
176
  mask = None
177
  else:
@@ -239,126 +215,6 @@ def submit_function(
239
  new_result_image.paste(result_image, (condition_width + 5, 0))
240
  return new_result_image
241
 
242
- # @spaces.GPU(duration=120)
243
- # def submit_function_p2p(
244
- # person_image,
245
- # cloth_image,
246
- # num_inference_steps,
247
- # guidance_scale,
248
- # seed):
249
- # person_image= person_image["background"]
250
-
251
- # tmp_folder = args.output_dir
252
- # date_str = datetime.now().strftime("%Y%m%d%H%M%S")
253
- # result_save_path = os.path.join(tmp_folder, date_str[:8], date_str[8:] + ".png")
254
- # if not os.path.exists(os.path.join(tmp_folder, date_str[:8])):
255
- # os.makedirs(os.path.join(tmp_folder, date_str[:8]))
256
-
257
- # generator = None
258
- # if seed != -1:
259
- # generator = torch.Generator(device='cuda').manual_seed(seed)
260
-
261
- # person_image = Image.open(person_image).convert("RGB")
262
- # cloth_image = Image.open(cloth_image).convert("RGB")
263
- # person_image = resize_and_crop(person_image, (args.width, args.height))
264
- # cloth_image = resize_and_padding(cloth_image, (args.width, args.height))
265
-
266
- # # Inference
267
- # try:
268
- # result_image = pipeline_p2p(
269
- # image=person_image,
270
- # condition_image=cloth_image,
271
- # num_inference_steps=num_inference_steps,
272
- # guidance_scale=guidance_scale,
273
- # generator=generator
274
- # )[0]
275
- # except Exception as e:
276
- # raise gr.Error(
277
- # "An error occurred. Please try again later: {}".format(e)
278
- # )
279
-
280
- # # Post-process
281
- # save_result_image = image_grid([person_image, cloth_image, result_image], 1, 3)
282
- # save_result_image.save(result_save_path)
283
- # return result_image
284
-
285
- # @spaces.GPU(duration=120)
286
- # def submit_function_flux(
287
- # person_image,
288
- # cloth_image,
289
- # cloth_type,
290
- # num_inference_steps,
291
- # guidance_scale,
292
- # seed,
293
- # show_type
294
- # ):
295
-
296
- # # Process image editor input
297
- # person_image, mask = person_image["background"], person_image["layers"][0]
298
- # mask = Image.open(mask).convert("L")
299
- # if len(np.unique(np.array(mask))) == 1:
300
- # mask = None
301
- # else:
302
- # mask = np.array(mask)
303
- # mask[mask > 0] = 255
304
- # mask = Image.fromarray(mask)
305
-
306
- # # Set random seed
307
- # generator = None
308
- # if seed != -1:
309
- # generator = torch.Generator(device='cuda').manual_seed(seed)
310
-
311
- # # Process input images
312
- # person_image = Image.open(person_image).convert("RGB")
313
- # cloth_image = Image.open(cloth_image).convert("RGB")
314
-
315
- # # Adjust image sizes
316
- # person_image = resize_and_crop(person_image, (args.width, args.height))
317
- # cloth_image = resize_and_padding(cloth_image, (args.width, args.height))
318
-
319
- # # Process mask
320
- # if mask is not None:
321
- # mask = resize_and_crop(mask, (args.width, args.height))
322
- # else:
323
- # mask = automasker(
324
- # person_image,
325
- # cloth_type
326
- # )['mask']
327
- # mask = mask_processor.blur(mask, blur_factor=9)
328
-
329
- # # Inference
330
- # result_image = pipeline_flux(
331
- # image=person_image,
332
- # condition_image=cloth_image,
333
- # mask_image=mask,
334
- # width=args.width,
335
- # height=args.height,
336
- # num_inference_steps=num_inference_steps,
337
- # guidance_scale=guidance_scale,
338
- # generator=generator
339
- # ).images[0]
340
-
341
- # # Post-processing
342
- # masked_person = vis_mask(person_image, mask)
343
-
344
- # # Return result based on show type
345
- # if show_type == "result only":
346
- # return result_image
347
- # else:
348
- # width, height = person_image.size
349
- # if show_type == "input & result":
350
- # condition_width = width // 2
351
- # conditions = image_grid([person_image, cloth_image], 2, 1)
352
- # else:
353
- # condition_width = width // 3
354
- # conditions = image_grid([person_image, masked_person, cloth_image], 3, 1)
355
-
356
- # conditions = conditions.resize((condition_width, height), Image.NEAREST)
357
- # new_result_image = Image.new("RGB", (width + condition_width + 5, height))
358
- # new_result_image.paste(conditions, (0, 0))
359
- # new_result_image.paste(result_image, (condition_width + 5, 0))
360
- # return new_result_image
361
-
362
 
363
  def person_example_fn(image_path):
364
  return image_path
@@ -502,247 +358,6 @@ def app_gradio():
502
  ],
503
  result_image,
504
  )
505
-
506
- # with gr.Tab("Mask-based & Flux.1 Fill Dev"):
507
- # with gr.Row():
508
- # with gr.Column(scale=1, min_width=350):
509
- # with gr.Row():
510
- # image_path_flux = gr.Image(
511
- # type="filepath",
512
- # interactive=True,
513
- # visible=False,
514
- # )
515
- # person_image_flux = gr.ImageEditor(
516
- # interactive=True, label="Person Image", type="filepath"
517
- # )
518
-
519
- # with gr.Row():
520
- # with gr.Column(scale=1, min_width=230):
521
- # cloth_image_flux = gr.Image(
522
- # interactive=True, label="Condition Image", type="filepath"
523
- # )
524
- # with gr.Column(scale=1, min_width=120):
525
- # gr.Markdown(
526
- # '<span style="color: #808080; font-size: small;">Two ways to provide Mask:<br>1. Upload the person image and use the `🖌️` above to draw the Mask (higher priority)<br>2. Select the `Try-On Cloth Type` to generate automatically </span>'
527
- # )
528
- # cloth_type = gr.Radio(
529
- # label="Try-On Cloth Type",
530
- # choices=["upper", "lower", "overall"],
531
- # value="upper",
532
- # )
533
-
534
- # submit_flux = gr.Button("Submit")
535
- # gr.Markdown(
536
- # '<center><span style="color: #FF0000">!!! Click only Once, Wait for Delay !!!</span></center>'
537
- # )
538
-
539
- # with gr.Accordion("Advanced Options", open=False):
540
- # num_inference_steps_flux = gr.Slider(
541
- # label="Inference Step", minimum=10, maximum=100, step=5, value=50
542
- # )
543
- # # Guidence Scale
544
- # guidance_scale_flux = gr.Slider(
545
- # label="CFG Strenth", minimum=0.0, maximum=50, step=0.5, value=30
546
- # )
547
- # # Random Seed
548
- # seed_flux = gr.Slider(
549
- # label="Seed", minimum=-1, maximum=10000, step=1, value=42
550
- # )
551
- # show_type = gr.Radio(
552
- # label="Show Type",
553
- # choices=["result only", "input & result", "input & mask & result"],
554
- # value="input & mask & result",
555
- # )
556
-
557
- # with gr.Column(scale=2, min_width=500):
558
- # result_image_flux = gr.Image(interactive=False, label="Result")
559
- # with gr.Row():
560
- # # Photo Examples
561
- # root_path = "resource/demo/example"
562
- # with gr.Column():
563
- # gr.Examples(
564
- # examples=[
565
- # os.path.join(root_path, "person", "men", _)
566
- # for _ in os.listdir(os.path.join(root_path, "person", "men"))
567
- # ],
568
- # examples_per_page=4,
569
- # inputs=image_path_flux,
570
- # label="Person Examples ①",
571
- # )
572
- # gr.Examples(
573
- # examples=[
574
- # os.path.join(root_path, "person", "women", _)
575
- # for _ in os.listdir(os.path.join(root_path, "person", "women"))
576
- # ],
577
- # examples_per_page=4,
578
- # inputs=image_path_flux,
579
- # label="Person Examples ②",
580
- # )
581
- # gr.Markdown(
582
- # '<span style="color: #808080; font-size: small;">*Person examples come from the demos of <a href="https://huggingface.co/spaces/levihsu/OOTDiffusion">OOTDiffusion</a> and <a href="https://www.outfitanyone.org">OutfitAnyone</a>. </span>'
583
- # )
584
- # with gr.Column():
585
- # gr.Examples(
586
- # examples=[
587
- # os.path.join(root_path, "condition", "upper", _)
588
- # for _ in os.listdir(os.path.join(root_path, "condition", "upper"))
589
- # ],
590
- # examples_per_page=4,
591
- # inputs=cloth_image_flux,
592
- # label="Condition Upper Examples",
593
- # )
594
- # gr.Examples(
595
- # examples=[
596
- # os.path.join(root_path, "condition", "overall", _)
597
- # for _ in os.listdir(os.path.join(root_path, "condition", "overall"))
598
- # ],
599
- # examples_per_page=4,
600
- # inputs=cloth_image_flux,
601
- # label="Condition Overall Examples",
602
- # )
603
- # condition_person_exm = gr.Examples(
604
- # examples=[
605
- # os.path.join(root_path, "condition", "person", _)
606
- # for _ in os.listdir(os.path.join(root_path, "condition", "person"))
607
- # ],
608
- # examples_per_page=4,
609
- # inputs=cloth_image_flux,
610
- # label="Condition Reference Person Examples",
611
- # )
612
- # gr.Markdown(
613
- # '<span style="color: #808080; font-size: small;">*Condition examples come from the Internet. </span>'
614
- # )
615
-
616
-
617
- # image_path_flux.change(
618
- # person_example_fn, inputs=image_path_flux, outputs=person_image_flux
619
- # )
620
-
621
- # submit_flux.click(
622
- # submit_function_flux,
623
- # [person_image_flux, cloth_image_flux, cloth_type, num_inference_steps_flux, guidance_scale_flux, seed_flux, show_type],
624
- # result_image_flux,
625
- # )
626
-
627
-
628
- # with gr.Tab("Mask-free & SD1.5"):
629
- # with gr.Row():
630
- # with gr.Column(scale=1, min_width=350):
631
- # with gr.Row():
632
- # image_path_p2p = gr.Image(
633
- # type="filepath",
634
- # interactive=True,
635
- # visible=False,
636
- # )
637
- # person_image_p2p = gr.ImageEditor(
638
- # interactive=True, label="Person Image", type="filepath"
639
- # )
640
-
641
- # with gr.Row():
642
- # with gr.Column(scale=1, min_width=230):
643
- # cloth_image_p2p = gr.Image(
644
- # interactive=True, label="Condition Image", type="filepath"
645
- # )
646
-
647
- # submit_p2p = gr.Button("Submit")
648
- # gr.Markdown(
649
- # '<center><span style="color: #FF0000">!!! Click only Once, Wait for Delay !!!</span></center>'
650
- # )
651
-
652
- # gr.Markdown(
653
- # '<span style="color: #808080; font-size: small;">Advanced options can adjust details:<br>1. `Inference Step` may enhance details;<br>2. `CFG` is highly correlated with saturation;<br>3. `Random seed` may improve pseudo-shadow.</span>'
654
- # )
655
- # with gr.Accordion("Advanced Options", open=False):
656
- # num_inference_steps_p2p = gr.Slider(
657
- # label="Inference Step", minimum=10, maximum=100, step=5, value=50
658
- # )
659
- # # Guidence Scale
660
- # guidance_scale_p2p = gr.Slider(
661
- # label="CFG Strenth", minimum=0.0, maximum=7.5, step=0.5, value=2.5
662
- # )
663
- # # Random Seed
664
- # seed_p2p = gr.Slider(
665
- # label="Seed", minimum=-1, maximum=10000, step=1, value=42
666
- # )
667
- # # show_type = gr.Radio(
668
- # # label="Show Type",
669
- # # choices=["result only", "input & result", "input & mask & result"],
670
- # # value="input & mask & result",
671
- # # )
672
-
673
- # with gr.Column(scale=2, min_width=500):
674
- # result_image_p2p = gr.Image(interactive=False, label="Result")
675
- # with gr.Row():
676
- # # Photo Examples
677
- # root_path = "resource/demo/example"
678
- # with gr.Column():
679
- # gr.Examples(
680
- # examples=[
681
- # os.path.join(root_path, "person", "men", _)
682
- # for _ in os.listdir(os.path.join(root_path, "person", "men"))
683
- # ],
684
- # examples_per_page=4,
685
- # inputs=image_path_p2p,
686
- # label="Person Examples ①",
687
- # )
688
- # gr.Examples(
689
- # examples=[
690
- # os.path.join(root_path, "person", "women", _)
691
- # for _ in os.listdir(os.path.join(root_path, "person", "women"))
692
- # ],
693
- # examples_per_page=4,
694
- # inputs=image_path_p2p,
695
- # label="Person Examples ②",
696
- # )
697
- # gr.Markdown(
698
- # '<span style="color: #808080; font-size: small;">*Person examples come from the demos of <a href="https://huggingface.co/spaces/levihsu/OOTDiffusion">OOTDiffusion</a> and <a href="https://www.outfitanyone.org">OutfitAnyone</a>. </span>'
699
- # )
700
- # with gr.Column():
701
- # gr.Examples(
702
- # examples=[
703
- # os.path.join(root_path, "condition", "upper", _)
704
- # for _ in os.listdir(os.path.join(root_path, "condition", "upper"))
705
- # ],
706
- # examples_per_page=4,
707
- # inputs=cloth_image_p2p,
708
- # label="Condition Upper Examples",
709
- # )
710
- # gr.Examples(
711
- # examples=[
712
- # os.path.join(root_path, "condition", "overall", _)
713
- # for _ in os.listdir(os.path.join(root_path, "condition", "overall"))
714
- # ],
715
- # examples_per_page=4,
716
- # inputs=cloth_image_p2p,
717
- # label="Condition Overall Examples",
718
- # )
719
- # condition_person_exm = gr.Examples(
720
- # examples=[
721
- # os.path.join(root_path, "condition", "person", _)
722
- # for _ in os.listdir(os.path.join(root_path, "condition", "person"))
723
- # ],
724
- # examples_per_page=4,
725
- # inputs=cloth_image_p2p,
726
- # label="Condition Reference Person Examples",
727
- # )
728
- # gr.Markdown(
729
- # '<span style="color: #808080; font-size: small;">*Condition examples come from the Internet. </span>'
730
- # )
731
-
732
- # image_path_p2p.change(
733
- # person_example_fn, inputs=image_path_p2p, outputs=person_image_p2p
734
- # )
735
-
736
- # submit_p2p.click(
737
- # submit_function_p2p,
738
- # [
739
- # person_image_p2p,
740
- # cloth_image_p2p,
741
- # num_inference_steps_p2p,
742
- # guidance_scale_p2p,
743
- # seed_p2p],
744
- # result_image_p2p,
745
- # )
746
 
747
  demo.queue().launch(share=True, show_error=True)
748
 
 
136
  )
137
 
138
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
139
  @spaces.GPU(duration=120)
140
  def submit_function(
141
  person_image,
 
146
  seed,
147
  show_type
148
  ):
149
+ person_image = person_image if isinstance(person_image, str) else person_image["background"]
150
+ mask = None # Ignore the mask if not provided
151
  if len(np.unique(np.array(mask))) == 1:
152
  mask = None
153
  else:
 
215
  new_result_image.paste(result_image, (condition_width + 5, 0))
216
  return new_result_image
217
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
218
 
219
  def person_example_fn(image_path):
220
  return image_path
 
358
  ],
359
  result_image,
360
  )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
361
 
362
  demo.queue().launch(share=True, show_error=True)
363