barreloflube
commited on
Commit
•
3237809
1
Parent(s):
0a8b4a2
Refactor UI structure and add interactive image tab
Browse files
app.py
CHANGED
@@ -397,7 +397,6 @@ def cleanup(pipeline, loras = None, embeddings = None):
|
|
397 |
|
398 |
|
399 |
# Gen function
|
400 |
-
@spaces.GPU
|
401 |
def gen_img(
|
402 |
request: SDReq | SDImg2ImgReq | SDInpaintReq
|
403 |
):
|
@@ -503,6 +502,298 @@ with open("data/images/loras/flux.json", "r") as f:
|
|
503 |
loras = json.load(f)
|
504 |
|
505 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
506 |
# Main Gradio app
|
507 |
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
508 |
# Header
|
|
|
397 |
|
398 |
|
399 |
# Gen function
|
|
|
400 |
def gen_img(
|
401 |
request: SDReq | SDImg2ImgReq | SDInpaintReq
|
402 |
):
|
|
|
502 |
loras = json.load(f)
|
503 |
|
504 |
|
505 |
+
# Event functions
|
506 |
+
def update_fast_generation(model, fast_generation):
|
507 |
+
if fast_generation:
|
508 |
+
return (
|
509 |
+
gr.update(
|
510 |
+
value=3.5
|
511 |
+
),
|
512 |
+
gr.update(
|
513 |
+
value=8
|
514 |
+
)
|
515 |
+
)
|
516 |
+
|
517 |
+
|
518 |
+
def selected_lora_from_gallery(evt: gr.SelectData):
|
519 |
+
return (
|
520 |
+
gr.update(
|
521 |
+
value=evt.index
|
522 |
+
)
|
523 |
+
)
|
524 |
+
|
525 |
+
|
526 |
+
def update_selected_lora(custom_lora):
|
527 |
+
link = custom_lora.split("/")
|
528 |
+
|
529 |
+
if len(link) == 2:
|
530 |
+
model_card = ModelCard.load(custom_lora)
|
531 |
+
trigger_word = model_card.data.get("instance_prompt", "")
|
532 |
+
image_url = f"""https://huggingface.co/{custom_lora}/resolve/main/{model_card.data.get("widget", [{}])[0].get("output", {}).get("url", None)}"""
|
533 |
+
|
534 |
+
custom_lora_info_css = """
|
535 |
+
<style>
|
536 |
+
.custom-lora-info {
|
537 |
+
font-family: -apple-system, BlinkMacSystemFont, 'Segoe UI', 'Roboto', 'Oxygen', 'Ubuntu', 'Cantarell', 'Fira Sans', 'Droid Sans', 'Helvetica Neue', sans-serif;
|
538 |
+
background: linear-gradient(135deg, #4a90e2, #7b61ff);
|
539 |
+
color: white;
|
540 |
+
padding: 16px;
|
541 |
+
border-radius: 8px;
|
542 |
+
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
|
543 |
+
margin: 16px 0;
|
544 |
+
}
|
545 |
+
.custom-lora-header {
|
546 |
+
font-size: 18px;
|
547 |
+
font-weight: 600;
|
548 |
+
margin-bottom: 12px;
|
549 |
+
}
|
550 |
+
.custom-lora-content {
|
551 |
+
display: flex;
|
552 |
+
align-items: center;
|
553 |
+
background-color: rgba(255, 255, 255, 0.1);
|
554 |
+
border-radius: 6px;
|
555 |
+
padding: 12px;
|
556 |
+
}
|
557 |
+
.custom-lora-image {
|
558 |
+
width: 80px;
|
559 |
+
height: 80px;
|
560 |
+
object-fit: cover;
|
561 |
+
border-radius: 6px;
|
562 |
+
margin-right: 16px;
|
563 |
+
}
|
564 |
+
.custom-lora-text h3 {
|
565 |
+
margin: 0 0 8px 0;
|
566 |
+
font-size: 16px;
|
567 |
+
font-weight: 600;
|
568 |
+
}
|
569 |
+
.custom-lora-text small {
|
570 |
+
font-size: 14px;
|
571 |
+
opacity: 0.9;
|
572 |
+
}
|
573 |
+
.custom-trigger-word {
|
574 |
+
background-color: rgba(255, 255, 255, 0.2);
|
575 |
+
padding: 2px 6px;
|
576 |
+
border-radius: 4px;
|
577 |
+
font-weight: 600;
|
578 |
+
}
|
579 |
+
</style>
|
580 |
+
"""
|
581 |
+
|
582 |
+
custom_lora_info_html = f"""
|
583 |
+
<div class="custom-lora-info">
|
584 |
+
<div class="custom-lora-header">Custom LoRA: {custom_lora}</div>
|
585 |
+
<div class="custom-lora-content">
|
586 |
+
<img class="custom-lora-image" src="{image_url}" alt="LoRA preview">
|
587 |
+
<div class="custom-lora-text">
|
588 |
+
<h3>{link[1].replace("-", " ").replace("_", " ")}</h3>
|
589 |
+
<small>{"Using: <span class='custom-trigger-word'>"+trigger_word+"</span> as the trigger word" if trigger_word else "No trigger word found. If there's a trigger word, include it in your prompt"}</small>
|
590 |
+
</div>
|
591 |
+
</div>
|
592 |
+
</div>
|
593 |
+
"""
|
594 |
+
|
595 |
+
custom_lora_info_html = f"{custom_lora_info_css}{custom_lora_info_html}"
|
596 |
+
|
597 |
+
return (
|
598 |
+
gr.update( # selected_lora
|
599 |
+
value=custom_lora,
|
600 |
+
),
|
601 |
+
gr.update( # custom_lora_info
|
602 |
+
value=custom_lora_info_html,
|
603 |
+
visible=True
|
604 |
+
)
|
605 |
+
)
|
606 |
+
|
607 |
+
else:
|
608 |
+
return (
|
609 |
+
gr.update( # selected_lora
|
610 |
+
value=custom_lora,
|
611 |
+
),
|
612 |
+
gr.update( # custom_lora_info
|
613 |
+
value=custom_lora_info_html if len(link) == 0 else "",
|
614 |
+
visible=False
|
615 |
+
)
|
616 |
+
)
|
617 |
+
|
618 |
+
|
619 |
+
def add_to_enabled_loras(model, selected_lora, enabled_loras):
|
620 |
+
lora_data = loras
|
621 |
+
try:
|
622 |
+
selected_lora = int(selected_lora)
|
623 |
+
|
624 |
+
if 0 <= selected_lora: # is the index of the lora in the gallery
|
625 |
+
lora_info = lora_data[selected_lora]
|
626 |
+
enabled_loras.append({
|
627 |
+
"repo_id": lora_info["repo"],
|
628 |
+
"trigger_word": lora_info["trigger_word"]
|
629 |
+
})
|
630 |
+
except ValueError:
|
631 |
+
link = selected_lora.split("/")
|
632 |
+
if len(link) == 2:
|
633 |
+
model_card = ModelCard.load(selected_lora)
|
634 |
+
trigger_word = model_card.data.get("instance_prompt", "")
|
635 |
+
enabled_loras.append({
|
636 |
+
"repo_id": selected_lora,
|
637 |
+
"trigger_word": trigger_word
|
638 |
+
})
|
639 |
+
|
640 |
+
return (
|
641 |
+
gr.update( # selected_lora
|
642 |
+
value=""
|
643 |
+
),
|
644 |
+
gr.update( # custom_lora_info
|
645 |
+
value="",
|
646 |
+
visible=False
|
647 |
+
),
|
648 |
+
gr.update( # enabled_loras
|
649 |
+
value=enabled_loras
|
650 |
+
)
|
651 |
+
)
|
652 |
+
|
653 |
+
|
654 |
+
def update_lora_sliders(enabled_loras):
|
655 |
+
sliders = []
|
656 |
+
remove_buttons = []
|
657 |
+
|
658 |
+
for lora in enabled_loras:
|
659 |
+
sliders.append(
|
660 |
+
gr.update(
|
661 |
+
label=lora.get("repo_id", ""),
|
662 |
+
info=f"Trigger Word: {lora.get('trigger_word', '')}",
|
663 |
+
visible=True,
|
664 |
+
interactive=True
|
665 |
+
)
|
666 |
+
)
|
667 |
+
remove_buttons.append(
|
668 |
+
gr.update(
|
669 |
+
visible=True,
|
670 |
+
interactive=True
|
671 |
+
)
|
672 |
+
)
|
673 |
+
|
674 |
+
if len(sliders) < 6:
|
675 |
+
for i in range(len(sliders), 6):
|
676 |
+
sliders.append(
|
677 |
+
gr.update(
|
678 |
+
visible=False
|
679 |
+
)
|
680 |
+
)
|
681 |
+
remove_buttons.append(
|
682 |
+
gr.update(
|
683 |
+
visible=False
|
684 |
+
)
|
685 |
+
)
|
686 |
+
|
687 |
+
return *sliders, *remove_buttons
|
688 |
+
|
689 |
+
|
690 |
+
def remove_from_enabled_loras(enabled_loras, index):
|
691 |
+
enabled_loras.pop(index)
|
692 |
+
return (
|
693 |
+
gr.update(
|
694 |
+
value=enabled_loras
|
695 |
+
)
|
696 |
+
)
|
697 |
+
|
698 |
+
|
699 |
+
@spaces.GPU
|
700 |
+
def generate_image(
|
701 |
+
model, prompt, negative_prompt, fast_generation, enabled_loras,
|
702 |
+
lora_slider_0, lora_slider_1, lora_slider_2, lora_slider_3, lora_slider_4, lora_slider_5,
|
703 |
+
img2img_image, inpaint_image, canny_image, pose_image, depth_image,
|
704 |
+
img2img_strength, inpaint_strength, canny_strength, pose_strength, depth_strength,
|
705 |
+
resize_mode,
|
706 |
+
scheduler, image_height, image_width, image_num_images_per_prompt,
|
707 |
+
image_num_inference_steps, image_guidance_scale, image_seed,
|
708 |
+
refiner, vae
|
709 |
+
):
|
710 |
+
base_args = {
|
711 |
+
"model": model,
|
712 |
+
"prompt": prompt,
|
713 |
+
"negative_prompt": negative_prompt,
|
714 |
+
"fast_generation": fast_generation,
|
715 |
+
"loras": None,
|
716 |
+
"resize_mode": resize_mode,
|
717 |
+
"scheduler": scheduler,
|
718 |
+
"height": int(image_height),
|
719 |
+
"width": int(image_width),
|
720 |
+
"num_images_per_prompt": float(image_num_images_per_prompt),
|
721 |
+
"num_inference_steps": float(image_num_inference_steps),
|
722 |
+
"guidance_scale": float(image_guidance_scale),
|
723 |
+
"seed": int(image_seed),
|
724 |
+
"refiner": refiner,
|
725 |
+
"vae": vae,
|
726 |
+
"controlnet_config": None,
|
727 |
+
}
|
728 |
+
base_args = SDReq(**base_args)
|
729 |
+
|
730 |
+
if len(enabled_loras) > 0:
|
731 |
+
base_args.loras = []
|
732 |
+
for enabled_lora, lora_slider in zip(enabled_loras, [lora_slider_0, lora_slider_1, lora_slider_2, lora_slider_3, lora_slider_4, lora_slider_5]):
|
733 |
+
if enabled_lora.get("repo_id", None):
|
734 |
+
base_args.loras.append(
|
735 |
+
{
|
736 |
+
"repo_id": enabled_lora["repo_id"],
|
737 |
+
"weight": lora_slider
|
738 |
+
}
|
739 |
+
)
|
740 |
+
|
741 |
+
image = None
|
742 |
+
mask_image = None
|
743 |
+
strength = None
|
744 |
+
|
745 |
+
if img2img_image:
|
746 |
+
image = img2img_image
|
747 |
+
strength = float(img2img_strength)
|
748 |
+
|
749 |
+
base_args = SDImg2ImgReq(
|
750 |
+
**base_args.__dict__,
|
751 |
+
image=image,
|
752 |
+
strength=strength
|
753 |
+
)
|
754 |
+
elif inpaint_image:
|
755 |
+
image = inpaint_image['background'] if not all(pixel == (0, 0, 0) for pixel in list(inpaint_image['background'].getdata())) else None
|
756 |
+
mask_image = inpaint_image['layers'][0] if image else None
|
757 |
+
strength = float(inpaint_strength)
|
758 |
+
|
759 |
+
base_args = SDInpaintReq(
|
760 |
+
**base_args.__dict__,
|
761 |
+
image=image,
|
762 |
+
mask_image=mask_image,
|
763 |
+
strength=strength
|
764 |
+
)
|
765 |
+
elif any([canny_image, pose_image, depth_image]):
|
766 |
+
base_args.controlnet_config = ControlNetReq(
|
767 |
+
controlnets=[],
|
768 |
+
control_images=[],
|
769 |
+
controlnet_conditioning_scale=[]
|
770 |
+
)
|
771 |
+
|
772 |
+
if canny_image:
|
773 |
+
base_args.controlnet_config.controlnets.append("canny_fl")
|
774 |
+
base_args.controlnet_config.control_images.append(canny_image)
|
775 |
+
base_args.controlnet_config.controlnet_conditioning_scale.append(float(canny_strength))
|
776 |
+
if pose_image:
|
777 |
+
base_args.controlnet_config.controlnets.append("pose_fl")
|
778 |
+
base_args.controlnet_config.control_images.append(pose_image)
|
779 |
+
base_args.controlnet_config.controlnet_conditioning_scale.append(float(pose_strength))
|
780 |
+
if depth_image:
|
781 |
+
base_args.controlnet_config.controlnets.append("depth_fl")
|
782 |
+
base_args.controlnet_config.control_images.append(depth_image)
|
783 |
+
base_args.controlnet_config.controlnet_conditioning_scale.append(float(depth_strength))
|
784 |
+
else:
|
785 |
+
base_args = SDReq(**base_args.__dict__)
|
786 |
+
|
787 |
+
images = gen_img(base_args)
|
788 |
+
|
789 |
+
return (
|
790 |
+
gr.update(
|
791 |
+
value=images,
|
792 |
+
interactive=True
|
793 |
+
)
|
794 |
+
)
|
795 |
+
|
796 |
+
|
797 |
# Main Gradio app
|
798 |
with gr.Blocks(theme=gr.themes.Soft(), css=css) as demo:
|
799 |
# Header
|