nielsgl commited on
Commit
5055c26
β€’
1 Parent(s): 5ea875e

copy app from nielsgl/dreambooth-pug-ace space

Browse files
Files changed (3) hide show
  1. README.md +7 -4
  2. app.py +209 -0
  3. requirements.txt +9 -0
README.md CHANGED
@@ -1,13 +1,16 @@
1
  ---
2
- title: Dreambooth Keras Pug Ace
3
- emoji: 🐨
4
- colorFrom: gray
5
- colorTo: red
6
  sdk: gradio
7
  sdk_version: 3.23.0
8
  app_file: app.py
9
  pinned: false
10
  license: creativeml-openrail-m
 
 
 
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
1
  ---
2
+ title: Dreambooth Pug Ace
3
+ emoji: 🐢
4
+ colorFrom: red
5
+ colorTo: indigo
6
  sdk: gradio
7
  sdk_version: 3.23.0
8
  app_file: app.py
9
  pinned: false
10
  license: creativeml-openrail-m
11
+ tags:
12
+ - keras-dreambooth
13
+ - nature
14
  ---
15
 
16
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
app.py ADDED
@@ -0,0 +1,209 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from huggingface_hub import from_pretrained_keras
2
+ from keras_cv import models
3
+ import gradio as gr
4
+
5
+ from tensorflow import keras
6
+
7
+ from diffusers import StableDiffusionPipeline
8
+
9
+ keras.mixed_precision.set_global_policy("mixed_float16")
10
+
11
+ # prepare model
12
+ resolution = 512
13
+ # sd_dreambooth_model = models.StableDiffusion(
14
+ # img_width=resolution, img_height=resolution
15
+ # )
16
+ # db_diffusion_model = from_pretrained_keras("keras-dreambooth/dreambooth_diffusion_model")
17
+ # sd_dreambooth_model._diffusion_model = db_diffusion_model
18
+
19
+ # checkpoint of the converted Stable Diffusion from KerasCV
20
+ model_ckpt = "nielsgl/dreambooth-keras-pug-ace-sd2.1"
21
+ pipeline = StableDiffusionPipeline.from_pretrained(model_ckpt)
22
+ pipeline.to("cuda")
23
+
24
+ unique_id = "puggieace"
25
+ class_label = "dog"
26
+ prompt = f"A photo of {unique_id} {class_label} on the beach"
27
+ image = pipeline(prompt, num_inference_steps=50).images[0]
28
+
29
+ # generate images
30
+ def infer(prompt, negative_prompt, guidance_scale=10, num_inference_steps=50):
31
+ neg = negative_prompt if negative_prompt else None
32
+ imgs = []
33
+ while len(imgs) != 2:
34
+ next_prompt = pipeline(prompt, negative_prompt=neg, guidance_scale=guidance_scale, num_inference_steps=num_inference_steps, num_images_per_prompt=5)
35
+ for img, is_neg in zip(next_prompt.images, next_prompt.nsfw_content_detected):
36
+ if not is_neg:
37
+ imgs.append(img)
38
+ if len(imgs) == 2:
39
+ break
40
+
41
+ return imgs
42
+
43
+ output = gr.Gallery(label="Outputs").style(grid=(1,2))
44
+
45
+ # customize interface
46
+ title = "KerasCV Stable Diffusion Demo on images of Ace."
47
+ description = "This is a dreambooth model fine-tuned on images of my pug named Ace. To try it, input the concept with `puggieace dog`."
48
+ examples=[
49
+ ["Portrait photo of puggieace dog on a beachtowel wearing sunglasses on the beach, sunset in background, golden hour", "", 12, 50],
50
+ ["A photo of a cute puggieace dog getting a haircut in a barbershop, ultra realistic, intricate details, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha", "", 12, 75],
51
+ ["Portrait of puggieace dog as a Roman Emperor, city in background, ultra realistic, intricate details, eerie, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha", "", 15, 75],
52
+ ["Photo of cute puggieace dog as an astronaut, space and planet in background, ultra realistic, concept art, intricate details, highly detailed, photorealistic, octane render, 8 k, unreal engine. trending on artstation", "", 15, 75],
53
+ ["Photo of cute puggieace dog as super hero, futuristic city in background, cinematic light, high dynamic range, insane intricate details, stunning cinema effects, aesthetic, masterpiece, trending on artstation, cartoon art", "", 12, 75],
54
+ ]
55
+
56
+ base_14 = "https://huggingface.co/nielsgl/dreambooth-pug-ace-sd1.4/resolve/main/"
57
+ base_21 = "https://huggingface.co/nielsgl/dreambooth-keras-pug-ace-sd2.1/resolve/main/"
58
+
59
+ model_card_1 = f"""
60
+ # KerasCV Stable Diffusion in Diffusers πŸ§¨πŸ€—
61
+
62
+ DreamBooth model for the `puggieace` concept trained by nielsgl on the [nielsgl/dreambooth-ace](https://huggingface.co/datasets/nielsgl/dreambooth-ace) dataset.
63
+ It can be used by modifying the `instance_prompt`: **a photo of puggieace**.
64
+
65
+ The examples are from 2 different Keras CV models (`StableDiffusion` and `StableDiffusionV2`, corresponding to Stable Diffusion V1.4 and V2.1, respectively) trained on the same dataset (`nielsgl/dreambooth-ace`).
66
+
67
+ ## Description
68
+
69
+ The Stable Diffusion V2 pipeline contained in the corresponding repository (`nielsgl/dreambooth-keras-pug-ace-sd2.1`) was created using a modified version of [this Space](https://huggingface.co/spaces/sayakpaul/convert-kerascv-sd-diffusers) for StableDiffusionV2 from KerasCV. The purpose is to convert the KerasCV Stable Diffusion weights in a way that is compatible with [Diffusers](https://github.com/huggingface/diffusers). This allows users to fine-tune using KerasCV and use the fine-tuned weights in Diffusers taking advantage of its nifty features (like [schedulers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/schedulers), [fast attention](https://huggingface.co/docs/diffusers/optimization/fp16), etc.).
70
+ This model was created as part of the Keras DreamBooth Sprint πŸ”₯. Visit the [organisation page](https://huggingface.co/keras-dreambooth) for instructions on how to take part!
71
+
72
+ ## Demo
73
+
74
+ """
75
+
76
+ model_card_2 = f"""
77
+ ## Examples
78
+
79
+ ### Stable Diffusion V1.4
80
+
81
+ > Portrait of puggieace dog as a Roman Emperor, city in background
82
+
83
+ ![Portrait of puggieace dog as a Roman Emperor, city in background, ultra realistic, intricate details, eerie, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha]({base_14}examples/emperor-1.4.jpeg)
84
+
85
+ > Photo of puggieace dog wearing sunglasses on the beach, sunset in background, golden hour
86
+
87
+ ![Photo of puggieace dog wearing sunglasses on the beach, sunset in background, golden hour]({base_14}examples/beach-1.4.jpg)
88
+
89
+ > Photo of cute puggieace dog as an astronaut, planet and spaceship in background
90
+
91
+ ![Photo of cute puggieace dog as an astronaut, planet and spaceship in background, ultra realistic, intricate details, highly detailed, photorealistic, octane render, 8 k, unreal engine. trending on artstation]({base_14}examples/astronaut-1.4.jpg)
92
+
93
+ ### Stable Diffusion V2.1
94
+
95
+ > Portrait painting of a cute puggieace dog as a samurai
96
+
97
+ ![Portrait painting of a cute puggieace dog as a samurai, ultra realistic, concept art, intricate details, eerie, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha]({base_21}examples/samurai-2.1.jpg)
98
+
99
+ > Photo of cute puggieace dog as an astronaut, space and planet in background
100
+
101
+ ![Photo of cute puggieace dog as an astronaut, space and planet in background, ultra realistic, concept art, intricate details, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater, trending on artstation]({base_21}examples/astronaut-2.1.jpg)
102
+
103
+ > A photo of a cute puggieace dog getting a haircut in a barbershop
104
+
105
+ ![A photo of a cute puggieace dog getting a haircut in a barbershop, ultra realistic, intricate details, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha]({base_21}examples/haircut-2.1.jpg)
106
+
107
+ > Portrait photo of puggieace dog in New York
108
+
109
+ ![Portrait photo of puggieace dog in New York, city and skyscrapers in background, highly detailed, photorealistic, hdr, 4k]({base_21}examples/ny-2.1.jpg)
110
+
111
+ > Portrait of puggieace dog as a Roman Emperor, city in background
112
+
113
+ ![Portrait of puggieace dog as a Roman Emperor, city in background, ultra realistic, intricate details, eerie, highly detailed, photorealistic, octane render, 8 k, unreal engine. art by artgerm and greg rutkowski and charlie bowater and magali villeneuve and alphonse mucha]({base_21}examples/emperor-2.1.jpg)
114
+
115
+
116
+ ## Usage with Stable Diffusion V1.4
117
+
118
+ ```python
119
+ from huggingface_hub import from_pretrained_keras
120
+ import keras_cv
121
+ import matplotlib.pyplot as plt
122
+
123
+
124
+ model = keras_cv.models.StableDiffusion(img_width=512, img_height=512, jit_compile=True)
125
+ model._diffusion_model = from_pretrained_keras("nielsgl/dreambooth-pug-ace")
126
+ model._text_encoder = from_pretrained_keras("nielsgl/dreambooth-pug-ace-text-encoder")
127
+
128
+ images = model.text_to_image("a photo of puggieace dog on the beach", batch_size=3)
129
+ plt.imshow(image[0])
130
+ ```
131
+
132
+ ## Usage with Stable Diffusion V2.1
133
+
134
+ ```python
135
+ from diffusers import StableDiffusionPipeline
136
+
137
+ pipeline = StableDiffusionPipeline.from_pretrained('nielsgl/dreambooth-keras-pug-ace-sd2.1')
138
+ image = pipeline().images[0]
139
+ image
140
+ ```
141
+
142
+ ### Training hyperparameters
143
+
144
+ The following hyperparameters were used during training for Stable Diffusion v1.4:
145
+
146
+ | Hyperparameters | Value |
147
+ | :-- | :-- |
148
+ | name | RMSprop |
149
+ | weight_decay | None |
150
+ | clipnorm | None |
151
+ | global_clipnorm | None |
152
+ | clipvalue | None |
153
+ | use_ema | False |
154
+ | ema_momentum | 0.99 |
155
+ | ema_overwrite_frequency | 100 |
156
+ | jit_compile | True |
157
+ | is_legacy_optimizer | False |
158
+ | learning_rate | 0.0010000000474974513 |
159
+ | rho | 0.9 |
160
+ | momentum | 0.0 |
161
+ | epsilon | 1e-07 |
162
+ | centered | False |
163
+ | training_precision | float32 |
164
+ """
165
+
166
+ with gr.Blocks() as demo:
167
+ with gr.Row():
168
+ gr.Markdown(model_card_1)
169
+ with gr.Row():
170
+ with gr.Column():
171
+ prompt_pos = gr.Textbox(label="Positive Prompt", value="a photo of puggieace dog getting a haircut")
172
+ prompt_neg = gr.Textbox(label="Negative Prompt", value="bad anatomy, blurry")
173
+ # gr.Slider(label='Number of gen image', minimum=1, maximum=4, value=2, step=1),
174
+ prompt_gs = gr.Number(label='Guidance scale', value=12)
175
+ prompt_steps = gr.Slider(label="Inference Steps",value=50)
176
+ prompt_btn = gr.Button("Generate")
177
+ with gr.Column():
178
+ output = gr.Gallery(label="Outputs").style(grid=(1,2))
179
+ prompt_btn.click(infer, inputs=[prompt_pos, prompt_neg, prompt_gs, prompt_steps], outputs=[output])
180
+ with gr.Row():
181
+ gr.Examples(examples, inputs=[prompt_pos, prompt_neg, prompt_gs, prompt_steps], outputs=output, fn=infer, cache_examples=True)
182
+ # gr.Interface(infer, inputs=[gr.Textbox(label="Positive Prompt", value="a photo of puggieace dog getting a haircut"),
183
+ # gr.Textbox(label="Negative Prompt", value="bad anatomy, blurry"),
184
+ # # gr.Slider(label='Number of gen image', minimum=1, maximum=4, value=2, step=1),
185
+ # gr.Number(label='Guidance scale', value=12),
186
+ # gr.Slider(label="Inference Steps",value=50),
187
+ # ], outputs=[output], title=title, description=description, examples=examples).queue()
188
+ with gr.Row():
189
+ with gr.Column():
190
+ gr.Markdown(model_card_2)
191
+ with gr.Column():
192
+ gr.Markdown(" ")
193
+
194
+ demo.queue().launch()
195
+
196
+
197
+
198
+ # with gr.Blocks() as card_interface:
199
+ # gr.Markdown(model_card)
200
+
201
+ # demo_interface = gr.Interface(infer, inputs=[gr.Textbox(label="Positive Prompt", value="a photo of puggieace dog getting a haircut"),
202
+ # gr.Textbox(label="Negative Prompt", value="bad anatomy, blurry"),
203
+ # # gr.Slider(label='Number of gen image', minimum=1, maximum=4, value=2, step=1),
204
+ # gr.Number(label='Guidance scale', value=12),
205
+ # gr.Slider(label="Inference Steps",value=50),
206
+ # ], outputs=[output], title=title, description=description, examples=examples).queue()
207
+
208
+
209
+ # gr.TabbedInterface([card_interface, demo_interface], ["Model Card", "Demo πŸ€—"]).launch()
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
1
+ --extra-index-url https://download.pytorch.org/whl/cu113
2
+ torch
3
+ keras-cv
4
+ tensorflow
5
+ huggingface-hub
6
+ diffusers
7
+ transformers
8
+ pycocotools
9
+ accelerate