euIaxs22's picture
Update app.py
72df854 verified
import os
import gradio as gr
import torch
import numpy as np
from PIL import Image
from diffusers import QwenImageEditPlusPipeline
model_path = "ovedrive/Qwen-Image-Edit-2509-4bit"
pipeline = QwenImageEditPlusPipeline.from_pretrained(model_path, torch_dtype=torch.bfloat16)
print("pipeline loaded") # not true but whatever. do not move to cuda
pipeline.set_progress_bar_config(disable=None)
#pipeline.enable_model_cpu_offload() #if you have enough VRAM replace this line with `pipeline.to("cuda")` which is 20GB VRAM
pipeline.to("cuda")
image = None
prompt = None
def generate(image: Image.Image, prompt: str):
inputs = {
"image": image,
"prompt": prompt,
"generator": torch.manual_seed(0),
"true_cfg_scale": 4.0,
"negative_prompt": " ",
"num_inference_steps": 20, # even 10 steps should be enough in many cases
}
result = None
with torch.inference_mode():
output = pipeline(**inputs)
output_image = output.images[0]
output_image.save("output_image_edit_plus.png")
print("image saved at", os.path.abspath("output_image_edit_plus.png"))
return os.path.abspath("output_image_edit_plus.png")
with gr.Blocks(title="teste") as demo:
image = gr.Image(type="pil", label="Imagem de entrada")
prompt = gr.Textbox(label="Edit prompt", placeholder="Descreva a edição desejada...")
run = gr.Button("Gerar")
out = gr.Image(label="Saída")
run.click(fn=generate, inputs=[image, prompt], outputs=[out])
if __name__ == "__main__":
demo.queue().launch(
server_name=os.getenv("GRADIO_SERVER_NAME", "0.0.0.0"),
server_port=int(os.getenv("GRADIO_SERVER_PORT", "7861")),
show_error=True,
)