File size: 3,162 Bytes
c2f314c
0d29e9f
7aec4a9
 
680c3bf
 
c2f314c
aa9e215
7aec4a9
0d29e9f
17a50fe
ee71b4d
7aec4a9
0d29e9f
7aec4a9
 
 
 
 
 
 
 
 
 
 
0d29e9f
 
bb347bc
c4c4b83
54771e9
a9057b2
c0c58ec
54771e9
 
 
c0c58ec
0d29e9f
 
54771e9
dac85aa
0d29e9f
 
dac85aa
b314fb6
0d29e9f
dac85aa
5be066f
54771e9
c0c58ec
0d29e9f
 
dac85aa
 
 
 
 
 
 
 
ca3f0cd
0d29e9f
 
dac85aa
c0c58ec
ca3f0cd
3f7bb18
 
6484989
ca3f0cd
 
dee90a9
ca3f0cd
3f7bb18
ca3f0cd
7aec4a9
3c786ac
 
 
 
 
 
0d29e9f
 
dac85aa
7aec4a9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
import gradio as gr
import jax
import jax.numpy as jnp
from diffusers import FlaxPNDMScheduler, FlaxStableDiffusionPipeline
from flax.jax_utils import replicate
from flax.training.common_utils import shard

DTYPE = jnp.float16

pipeline, pipeline_params = FlaxStableDiffusionPipeline.from_pretrained(
    "bguisard/stable-diffusion-nano-2-1",
    revision="flax",
    dtype=DTYPE,
)
if DTYPE != jnp.float32:
    # There is a known issue with schedulers when loading from a pre trained
    # pipeline. We need the schedulers to always use float32.
    # See: https://github.com/huggingface/diffusers/issues/2155
    scheduler, scheduler_params = FlaxPNDMScheduler.from_pretrained(
        pretrained_model_name_or_path="bguisard/stable-diffusion-nano-2-1",
        subfolder="scheduler",
        dtype=jnp.float32,
    )
    pipeline_params["scheduler"] = scheduler_params
    pipeline.scheduler = scheduler


def generate_image(prompt: str, inference_steps: int = 30, prng_seed: int = 0):
    rng = jax.random.PRNGKey(int(prng_seed))
    rng = jax.random.split(rng, jax.device_count())
    p_params = replicate(pipeline_params)

    num_samples = 1
    prompt_ids = pipeline.prepare_inputs([prompt] * num_samples)
    prompt_ids = shard(prompt_ids)

    images = pipeline(
        prompt_ids=prompt_ids,
        params=p_params,
        prng_seed=rng,
        height=128,
        width=128,
        num_inference_steps=int(inference_steps),
        jit=True,
    ).images

    images = images.reshape((num_samples,) + images.shape[-3:])
    images = pipeline.numpy_to_pil(images)
    return images[0]


prompt_input = gr.inputs.Textbox(
    label="Prompt", placeholder="A watercolor painting of a bird"
)
inf_steps_input = gr.inputs.Slider(
    minimum=1, maximum=100, default=30, step=1, label="Inference Steps"
)
seed_input = gr.inputs.Number(default=0, label="Seed")


app = gr.Interface(
    fn=generate_image,
    inputs=[prompt_input, inf_steps_input, seed_input],
    outputs="image",
    title="🤗 Stable Diffusion Nano 🧨",
    description=(
        "Based on stable diffusion and fine-tuned on 128x128 images, "
        "[Stable Diffusion Nano](https://huggingface.co/bguisard/stable-diffusion-nano-2-1) allows "
        "for fast prototyping of diffusion models, enabling quick experimentation "
        "with easily available hardware."
        " It performs reasonably well on several tasks, but it struggles "
        "with small details such as faces."
    ),
    css="h1 { text-align: center }",
    # Some examples were copied from hf.co/spaces/stabilityai/stable-diffusion
    examples=[
        ["A watercolor painting of a bird", 50, 0],
        ["A small cabin on top of a snowy mountain in the style of Disney, artstation", 50, 232190380],
        ["A mecha robot in a favela in expressionist style", 50, 827198341273],
        ["Marvel MCU deadpool, red mask, red shirt, red gloves, black shoulders, black elbow pads, black legs, gold buckle, black belt, black mask, white eyes, black boots, fuji low light color 35mm film, downtown Osaka alley at night out of focus in background, neon lights", 50, 23856839],
    ],
)

app.launch()