import gradio as gr
import spaces
import torch
import os
import subprocess
import shlex
from src.demo.model import DesignEdit
os.makedirs('models', exist_ok=True)
subprocess.run(shlex.split('wget https://huggingface.co/Adapter/DragonDiffusion/resolve/main/model/efficient_sam_vits.pt -O models/efficient_sam_vits.pt'))
from src.demo.demo import *
import shlex
import cv2
pretrained_model_path = "stabilityai/stable-diffusion-xl-base-1.0"
model = DesignEdit(pretrained_model_path=pretrained_model_path)
DESCRIPTION_1 = """
🌿D
e
s
i
g
n
E
d
i
t🌿
"""
DESCRIPTION_2 = """
Multi-Layered Latent Decomposition and Fusion for Unified & Accurate Image Editing
"""
DESCRIPTION_3 = """
"""
with gr.Blocks(css='style.css') as demo:
gr.HTML(DESCRIPTION_1)
gr.HTML(DESCRIPTION_2)
gr.HTML(DESCRIPTION_3)
with gr.Tabs():
with gr.TabItem('1️⃣ Object Removal'):
create_demo_remove(model.run_remove)
with gr.TabItem('2️⃣ Zooming Out'):
create_demo_zooming(model.run_zooming)
with gr.TabItem('3️⃣ Camera Panning'):
create_demo_panning(model.run_panning)
with gr.TabItem('4️⃣ Object Moving, Resizing and Flipping'):
create_demo_moving(model.run_moving)
with gr.TabItem('5️⃣ 🚩 Multi-Layered Editing 🚩'):
create_demo_layer(model.run_layer)
with gr.TabItem('🔧 Mask Preparation: Draw or Sketch'):
create_demo_mask_box(model.run_mask)
demo.queue(max_size=20)
demo.launch(max_threads=3, server_name="0.0.0.0")