Files changed (1) hide show
  1. app.py +26 -0
app.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from diffusers import LDMPipeline
2
+ import torch
3
+ import PIL.Image
4
+ import gradio as gr
5
+ import random
6
+ import numpy as np
7
+
8
+ pipeline = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256")
9
+
10
+ def predict(steps, seed):
11
+ generator = torch.manual_seed(seed)
12
+ for i in range(1,steps):
13
+ yield pipeline(generator=generator, num_inference_steps=i)["sample"][0]
14
+
15
+ random_seed = random.randint(0, 2147483647)
16
+ gr.Interface(
17
+ predict,
18
+ inputs=[
19
+ gr.inputs.Slider(1, 100, label='Inference Steps', default=5, step=1),
20
+ gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1),
21
+ ],
22
+ outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"),
23
+ css="#output_image{width: 256px}",
24
+ title="ldm-celebahq-256 - 🧨 diffusers library",
25
+ description="This Spaces contains an unconditional Latent Diffusion process for the <a href=\"https://huggingface.co/CompVis/ldm-celebahq-256\">ldm-celebahq-256</a> face generator model by <a href=\"https://huggingface.co/CompVis\">CompVis</a> using the <a href=\"https://github.com/huggingface/diffusers\">diffusers library</a>. The goal of this demo is to showcase the diffusers library capabilities. If you want the state-of-the-art experience with Latent Diffusion text-to-image check out the <a href=\"https://huggingface.co/spaces/multimodalart/latentdiffusion\">main Spaces</a>.",
26
+ ).queue().launch()