File size: 1,181 Bytes
5f8a996
22595de
fea8c5b
5f8a996
 
 
095368e
af68b3c
095368e
 
5f8a996
 
 
 
 
 
 
51a097e
c76773c
d3f0c38
51a097e
5f8a996
 
 
 
 
 
 
 
eb97cd1
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
from hidiffusion import apply_hidiffusion, remove_hidiffusion
from diffusers import DiffusionPipeline, DDIMScheduler, AutoencoderKL
import gradio as gr
import torch
import spaces

model = "stabilityai/stable-diffusion-xl-base-1.0"
vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
scheduler = DDIMScheduler.from_pretrained(model, subfolder="scheduler")
pipe = DiffusionPipeline.from_pretrained(model, vae=vae, scheduler=scheduler, torch_dtype=torch.float16, use_safetensors=True, variant="fp16").to("cuda")

#pipe.enable_model_cpu_offload()
#pipe.enable_vae_tiling()

# Apply hidiffusion with a single line of code.
apply_hidiffusion(pipe)

@spaces.GPU
def run_hidiffusion(prompt, negative_prompt, progress=gr.Progress(track_tqdm=True)):
    return pipe(prompt, guidance_scale=7.5, height=2048, width=2048, eta=1.0, negative_prompt=negative_prompt, num_inference_steps=25).images[0]
        
with gr.Blocks() as demo:
    prompt = gr.Textbox()
    negative_prompt = gr.Textbox()
    btn = gr.Button("Run")
    output = gr.Image()

    btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output])
    
demo.launch()