from diffusers import LDMPipeline import torch import PIL.Image import gradio as gr import random import numpy as np pipeline = LDMPipeline.from_pretrained("CompVis/ldm-celebahq-256") def predict(steps, seed): generator = torch.manual_seed(seed) for i in range(1,steps): yield pipeline(generator=generator, num_inference_steps=i)["sample"][0] random_seed = random.randint(0, 2147483647) gr.Interface( predict, inputs=[ gr.inputs.Slider(1, 100, label='Inference Steps', default=5, step=1), gr.inputs.Slider(0, 2147483647, label='Seed', default=random_seed, step=1), ], outputs=gr.Image(shape=[256,256], type="pil", elem_id="output_image"), css="#output_image{width: 256px}", title="ldm-celebahq-256 - 🧨 diffusers library", description="This Spaces contains an unconditional Latent Diffusion process for the ldm-celebahq-256 face generator model by CompVis using the diffusers library. The goal of this demo is to showcase the diffusers library capabilities. If you want the state-of-the-art experience with Latent Diffusion text-to-image check out the main Spaces.", ).queue().launch()