HiDiffusion / app.py
multimodalart's picture
Update app.py
eb97cd1 verified
raw
history blame
No virus
1.5 kB
from hidiffusion import apply_hidiffusion, remove_hidiffusion
from diffusers import DiffusionPipeline, DDIMScheduler, AutoencoderKL
import torch
import spaces
vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse")
safety_checker=StableDiffusionSafetyChecker.from_pretrained("CompVis/stable-diffusion-safety-checker"),
feature_extractor=CLIPFeatureExtractor.from_pretrained("openai/clip-vit-base-patch32")
pretrain_model = "SG161222/Realistic_Vision_V5.1_noVAE"
scheduler = DDIMScheduler.from_pretrained(pretrain_model, subfolder="scheduler")
pipe = DiffusionPipeline.from_pretrained(pretrain_model, scheduler = scheduler, safety_checker=safety_checker, feature_extractor=feature_extractor, vae=vae, torch_dtype=torch.float16).to("cuda")
# # Optional. enable_xformers_memory_efficient_attention can save memory usage and increase inference speed. enable_model_cpu_offload and enable_vae_tiling can save memory usage.
#pipe.enable_model_cpu_offload()
#pipe.enable_vae_tiling()
# Apply hidiffusion with a single line of code.
apply_hidiffusion(pipe)
with gr.Blocks() as demo:
prompt = gr.Textbox()
negative_prompt = gr.Textbox()
btn = gr.Button("Run")
output = gr.Image()
btn.click(fn=run_hidiffusion, inputs=[prompt, negative_prompt], outputs=[output])
@spaces.GPU
def run_hidiffusion(prompt, negative_prompt):
return pipe(prompt, guidance_scale=7.5, height=1024, width=1024, eta=1.0, negative_prompt=negative_prompt).images[0]
demo.launch()