williamberman commited on
Commit
43e2030
1 Parent(s): e550111

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +42 -0
app.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import torch
3
+ from diffusers import DiffusionPipeline
4
+
5
+ print(f"Is CUDA available: {torch.cuda.is_available()}")
6
+
7
+ if torch.cuda.is_available():
8
+ print(f"CUDA device: {torch.cuda.get_device_name(torch.cuda.current_device())}")
9
+ pipe_vq = DiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq", torch_dtype=torch.float16, revision="fp16").to("cuda")
10
+ else:
11
+ pipe_vq = DiffusionPipeline.from_pretrained("microsoft/vq-diffusion-ithq")
12
+
13
+ examples = [
14
+ ["An astronaut riding a horse."],
15
+ ["A teddy bear playing in the water."],
16
+ ["A simple wedding cake with lego bride and groom topper and cake pops."],
17
+ ["A realistic tree using a mixture of different colored pencils."],
18
+ ["Muscular Santa Claus."],
19
+ ["A man with a pineapple head."],
20
+ ["Pebble tower standing on the left on the sea beach."],
21
+ ]
22
+
23
+ title = "VQ Diffusion vs. Stable Diffusion 1-5"
24
+ description = "[VQ-Diffusion-ITHQ](https://huggingface.co/microsoft/vq-diffusion-ithq) for text to image generation."
25
+
26
+
27
+ def inference(text):
28
+ output_vq_diffusion = pipe_vq(text, truncation_rate=0.86).images[0]
29
+ return output_vq_diffusion
30
+
31
+
32
+ io = gr.Interface(
33
+ inference,
34
+ gr.Textbox(lines=3),
35
+ outputs=[
36
+ gr.Image(type="pil", label="VQ-Diffusion"),
37
+ ],
38
+ title=title,
39
+ description=description,
40
+ examples=examples
41
+ )
42
+ io.launch()