Pixart-Sigma / app.py
artificialguybr's picture
Create app.py
f8cfb21 verified
raw
history blame
1.92 kB
import gradio as gr
import spaces
import torch
from diffusers import Transformer2DModel
from scripts.diffusers_patches import pixart_sigma_init_patched_inputs, PixArtSigmaPipeline
assert getattr(Transformer2DModel, '_init_patched_inputs', False), "Need to Upgrade diffusers: pip install git+https://github.com/huggingface/diffusers"
setattr(Transformer2DModel, '_init_patched_inputs', pixart_sigma_init_patched_inputs)
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
weight_dtype = torch.float16
transformer = Transformer2DModel.from_pretrained(
"PixArt-alpha/PixArt-Sigma-XL-2-1024-MS",
subfolder='transformer',
torch_dtype=weight_dtype,
use_safetensors=True,
)
pipe = PixArtSigmaPipeline.from_pretrained(
"PixArt-alpha/pixart_sigma_sdxlvae_T5_diffusers",
transformer=transformer,
torch_dtype=weight_dtype,
use_safetensors=True,
)
pipe.to(device)
@spaces.GPU(duration=120)
def generate(prompt, negative_prompt, num_inference_steps, guidance_scale, height, width):
image = pipe(
prompt,
negative_prompt=negative_prompt,
num_inference_steps=num_inference_steps,
guidance_scale=guidance_scale,
height=height,
width=width
).images[0]
return image
interface = gr.Interface(
fn=generate,
inputs=[
gr.Text(label="Prompt"),
gr.Text(label="Negative Prompt"),
gr.Slider(minimum=1, maximum=500, value=100, step=1, label="Number of Inference Steps"),
gr.Slider(minimum=1, maximum=20, value=4.5, step=0.1, label="Guidance Scale"),
gr.Slider(minimum=64, maximum=1024, value=512, step=64, label="Height"),
gr.Slider(minimum=64, maximum=1024, value=512, step=64, label="Width"),
],
outputs=gr.Image(label="Generated Image"),
title="PixArt Sigma Image Generation",
description="Generate images using the PixArt Sigma model.",
)
interface.launch()