File size: 933 Bytes
43e2030
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
06264f9
43e2030
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
import gradio as gr
import torch
from diffusers import DiffusionPipeline

print(f"Is CUDA available: {torch.cuda.is_available()}")

if torch.cuda.is_available():
    print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
    pipe_vq = DiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq", torch_dtype=torch.float16, revision="fp16").to("cuda")
else:
    pipe_vq = DiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq")

title = "VQ Diffusion vs. Stable Diffusion 1-5"
description = "[VQ-Diffusion-ITHQ](https://huggingface.co/microsoft/vq-diffusion-ithq) for text to image generation."

def inference(text):
    output_vq_diffusion = pipe_vq(text, truncation_rate=0.86).images[0]
    return output_vq_diffusion

io = gr.Interface(
  inference,
  gr.Textbox(lines=3),
  outputs=[
    gr.Image(type="pil", label="VQ-Diffusion"),
  ],
  title=title,
  description=description
)
io.launch()