File size: 4,992 Bytes
8a8373d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
21b1026
8a8373d
 
 
 
 
 
 
 
 
 
723e7fd
8a8373d
 
 
 
723e7fd
8a8373d
 
 
 
 
 
 
 
 
 
21b1026
 
 
8a8373d
 
 
 
 
 
 
 
6c6eefc
8a8373d
 
6c6eefc
 
8a8373d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2d788bc
8a8373d
 
 
 
 
 
 
 
e45a310
8a8373d
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
from huggingface_hub import from_pretrained_keras
from keras_cv import models
import gradio as gr

from tensorflow import keras

from diffusers import StableDiffusionPipeline

keras.mixed_precision.set_global_policy("mixed_float16")

# prepare model
resolution = 512

# checkpoint of the converted Stable Diffusion from KerasCV
model_ckpt = "nielsgl/dreambooth-bored-ape"
pipeline = StableDiffusionPipeline.from_pretrained(model_ckpt)
pipeline = pipeline.to("cuda")

unique_id = "drawbayc"
class_label = "monkey"
prompt = f"A drawing of {unique_id} {class_label} as a cowboy"
image = pipeline(prompt, num_inference_steps=50).images[0]

# generate images
def infer(prompt, negative_prompt, guidance_scale=10, num_inference_steps=50):
    neg = negative_prompt if negative_prompt else None
    imgs = []
    while len(imgs) != 4:
        next_prompt = pipeline(prompt, negative_prompt=neg, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, num_images_per_prompt=5)
        for img, is_neg in zip(next_prompt.images, next_prompt.nsfw_content_detected):
            if not is_neg:
                imgs.append(img)
            if len(imgs) == 4:
                break
            
    return imgs
    
output = gr.Gallery(label="Outputs").style(grid=(1,2))

# customize interface
title = "KerasCV Stable Diffusion Demo on images of Bored Apes."
description = "This is a dreambooth model fine-tuned on images the NFT collection of the Bored Ape Yacht Club. To try it, input the concept with `drawbayc ape`."
examples=[
    ["A drawing of a drawbayc ape dressed as a cowboy", "bad anatomy, blurry, ugly, deformed, disfigured", 12, 75],
    ["A drawing of a drawbayc ape dressed as a clown", "bad anatomy, blurry, ugly, deformed, disfigured", 12, 75],
    ["A drawing of a drawbayc ape dressed as a turtle", "bad anatomy, blurry, ugly, deformed, disfigured", 12, 75],

]

base_14 = "https://huggingface.co/nielsgl/dreambooth-bored-ape/resolve/main/"

model_card_1 = f"""
# KerasCV Stable Diffusion in Diffusers 🧨🤗

DreamBooth model for the `drawbayc ape` concept trained by nielsgl on the [bayc-tiny](https://huggingface.co/datasets/nielsgl/bayc-tiny) dataset, images from this [Kaggle dataset](https://www.kaggle.com/datasets/stanleyjzheng/bored-apes-yacht-club).
It can be used by modifying the `instance_prompt`: **a drawing of drawbayc ape**

The model for this space can be found [here](https://huggingface.co/nielsgl/dreambooth-bored-ape).

## Description

The Stable Diffusion V2 pipeline contained in the corresponding repository (`nielsgl/dreambooth-bored-ape`) was created using a modified version of [this Space](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) for StableDiffusionV2 from KerasCV. The purpose is to convert the KerasCV Stable Diffusion weights in a way that is compatible with [Diffusers](https://github.com/huggingface/diffusers). This allows users to fine-tune using KerasCV and use the fine-tuned weights in Diffusers taking advantage of its nifty features (like [schedulers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/schedulers), [fast attention](https://huggingface.co/docs/diffusers/optimization/fp16), etc.).
This model was created as part of the Keras DreamBooth Sprint 🔥. Visit the [organisation page](https://huggingface.co/keras-dreambooth) for instructions on how to take part!

## Demo

"""

model_card_2 = f"""
## Examples


> A drawing of drawbayc monkey dressed as an astronaut

![a drawing of drawbayc monkey dressed as an astronaut]({base_14}examples/astronaut.jpg)

> A drawing of drawbayc monkey dressed as the pope

![> A drawing of drawbayc monkey dressed as an astronaut]({base_14}examples/pope.jpg)


## Usage with Stable Diffusion V2.1

```python
from diffusers import StableDiffusionPipeline

pipeline = StableDiffusionPipeline.from_pretrained('nielsgl/dreambooth-bored-ape')
image = pipeline().images[0]
image
```

"""

with gr.Blocks() as demo:
    with gr.Row():
        gr.Markdown(model_card_1)
    with gr.Row():
        with gr.Column():
            prompt_pos = gr.Textbox(label="Positive Prompt", value="a drawing of drawbayc ape as an astronaut")
            prompt_neg = gr.Textbox(label="Negative Prompt", value="bad anatomy, blurry, ugly, deformed, disfigured")

            prompt_gs = gr.Number(label='Guidance scale', value=12)
            prompt_steps = gr.Slider(label="Inference Steps",value=50)
            prompt_btn = gr.Button("Generate")
        with gr.Column():
            output = gr.Gallery(label="Outputs").style(grid=(1,2))
    prompt_btn.click(infer, inputs=[prompt_pos, prompt_neg, prompt_gs, prompt_steps], outputs=[output])
    with gr.Row():
        gr.Examples(examples, inputs=[prompt_pos, prompt_neg, prompt_gs, prompt_steps], outputs=output, fn=infer, cache_examples=True)
    with gr.Row():
        with gr.Column():
            gr.Markdown(model_card_2)
        with gr.Column():
            gr.Markdown(" ")

demo.queue().launch()