image_generator / app.py
SantiagoTesla's picture
Duplicate from bguisard/stable-diffusion-nano
5c1812d
import gradio as gr
import jax
import jax.numpy as jnp
from diffusers import FlaxPNDMScheduler, FlaxStableDiffusionPipeline
from flax.jax_utils import replicate
from flax.training.common_utils import shard
DTYPE = jnp.float16
pipeline, pipeline_params = FlaxStableDiffusionPipeline.from_pretrained(
"bguisard/stable-diffusion-nano-2-1",
revision="flax",
dtype=DTYPE,
)
if DTYPE != jnp.float32:
# There is a known issue with schedulers when loading from a pre trained
# pipeline. We need the schedulers to always use float32.
# See: https://github.com/huggingface/diffusers/issues/2155
scheduler, scheduler_params = FlaxPNDMScheduler.from_pretrained(
pretrained_model_name_or_path="bguisard/stable-diffusion-nano-2-1",
subfolder="scheduler",
dtype=jnp.float32,
)
pipeline_params["scheduler"] = scheduler_params
pipeline.scheduler = scheduler
def generate_image(prompt: str, inference_steps: int = 30, prng_seed: int = 0):
rng = jax.random.PRNGKey(int(prng_seed))
rng = jax.random.split(rng, jax.device_count())
p_params = replicate(pipeline_params)
num_samples = 1
prompt_ids = pipeline.prepare_inputs([prompt] * num_samples)
prompt_ids = shard(prompt_ids)
images = pipeline(
prompt_ids=prompt_ids,
params=p_params,
prng_seed=rng,
height=128,
width=128,
num_inference_steps=int(inference_steps),
jit=True,
).images
images = images.reshape((num_samples,) + images.shape[-3:])
images = pipeline.numpy_to_pil(images)
return images[0]
prompt_input = gr.inputs.Textbox(
label="Prompt", placeholder="A watercolor painting of a bird"
)
inf_steps_input = gr.inputs.Slider(
minimum=1, maximum=100, default=30, step=1, label="Inference Steps"
)
seed_input = gr.inputs.Number(default=0, label="Seed")
app = gr.Interface(
fn=generate_image,
inputs=[prompt_input, inf_steps_input, seed_input],
outputs="image",
title="🤗 Stable Diffusion Nano 🧨",
description=(
"Based on stable diffusion and fine-tuned on 128x128 images, "
"[Stable Diffusion Nano](https://huggingface.co/bguisard/stable-diffusion-nano-2-1) allows "
"for fast prototyping of diffusion models, enabling quick experimentation "
"with easily available hardware."
" It performs reasonably well on several tasks, but it struggles "
"with small details such as faces."
),
css="h1 { text-align: center }",
# Some examples were copied from hf.co/spaces/stabilityai/stable-diffusion
examples=[
["A watercolor painting of a bird", 50, 0],
["A small cabin on top of a snowy mountain in the style of Disney, artstation", 50, 232190380],
["A mecha robot in a favela in expressionist style", 50, 827198341273],
["Marvel MCU deadpool, red mask, red shirt, red gloves, black shoulders, black elbow pads, black legs, gold buckle, black belt, black mask, white eyes, black boots, fuji low light color 35mm film, downtown Osaka alley at night out of focus in background, neon lights", 50, 23856839],
],
)
app.launch()