ehristoforu commited on
Commit
77893cf
1 Parent(s): 9b56275

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +275 -0
app.py ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+
3
+ import os
4
+ import random
5
+ import uuid
6
+
7
+ import gradio as gr
8
+ import numpy as np
9
+ from PIL import Image
10
+ import spaces
11
+ import torch
12
+ from diffusers import StableDiffusionPipeline, StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
13
+
14
+ DESCRIPTION = """
15
+ # [Fluently Playground](https://huggingface.co/fluently)
16
+
17
+ """
18
+ if not torch.cuda.is_available():
19
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
20
+
21
+ MAX_SEED = np.iinfo(np.int32).max
22
+
23
+ USE_TORCH_COMPILE = 0
24
+ ENABLE_CPU_OFFLOAD = 0
25
+
26
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
27
+
28
+
29
+ if torch.cuda.is_available():
30
+ pipe_3_5 = StableDiffusionPipeline.from_pretrained(
31
+ "fluently/Fluently-v3.5",
32
+ torch_dtype=torch.float16,
33
+ use_safetensors=True,
34
+ )
35
+ pipe_3_5.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_3_5.scheduler.config)
36
+ pipe_3_5.to(device)
37
+
38
+ pipe_anime = StableDiffusionPipeline.from_pretrained(
39
+ "fluently/Fluently-anime",
40
+ torch_dtype=torch.float16,
41
+ use_safetensors=True,
42
+ )
43
+ pipe_anime.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_anime.scheduler.config)
44
+ pipe_anime.to(device)
45
+
46
+ pipe_epic = StableDiffusionPipeline.from_pretrained(
47
+ "fluently/Fluently-epic",
48
+ torch_dtype=torch.float16,
49
+ use_safetensors=True,
50
+ )
51
+ pipe_epic.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_epic.scheduler.config)
52
+ pipe_epic.to(device)
53
+
54
+ pipe_xl = StableDiffusionXLPipeline.from_pretrained(
55
+ "fluently/Fluently-XL-v1",
56
+ torch_dtype=torch.float16,
57
+ use_safetensors=True,
58
+ )
59
+ pipe_xl.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe_xl.scheduler.config)
60
+ pipe_xl.to(device)
61
+
62
+ print("Loaded on Device!")
63
+
64
+ if USE_TORCH_COMPILE:
65
+ pipe.unet = torch.compile(pipe.unet, mode="reduce-overhead", fullgraph=True)
66
+ print("Model Compiled!")
67
+
68
+
69
+ def save_image(img):
70
+ unique_name = str(uuid.uuid4()) + ".png"
71
+ img.save(unique_name)
72
+ return unique_name
73
+
74
+
75
+ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
76
+ if randomize_seed:
77
+ seed = random.randint(0, MAX_SEED)
78
+ return seed
79
+
80
+
81
+ @spaces.GPU(enable_queue=True)
82
+ def generate(
83
+ prompt: str,
84
+ negative_prompt: str = "",
85
+ use_negative_prompt: bool = False,
86
+ seed: int = 0,
87
+ width: int = 1024,
88
+ height: int = 1024,
89
+ guidance_scale: float = 3,
90
+ randomize_seed: bool = False,
91
+ model: str,
92
+ progress=gr.Progress(track_tqdm=True),
93
+ ):
94
+
95
+ pipe.to(device)
96
+ seed = int(randomize_seed_fn(seed, randomize_seed))
97
+
98
+ if not use_negative_prompt:
99
+ negative_prompt = "" # type: ignore
100
+ if model == "Fluently V3.5":
101
+ images = pipe_3_5(
102
+ prompt=prompt,
103
+ negative_prompt=negative_prompt,
104
+ width=width,
105
+ height=height,
106
+ guidance_scale=guidance_scale,
107
+ num_inference_steps=30,
108
+ num_images_per_prompt=1,
109
+ output_type="pil",
110
+ ).images
111
+ elif model == "Fluently Anime":
112
+ images = pipe_anime(
113
+ prompt=prompt,
114
+ negative_prompt=negative_prompt,
115
+ width=width,
116
+ height=height,
117
+ guidance_scale=guidance_scale,
118
+ num_inference_steps=30,
119
+ num_images_per_prompt=1,
120
+ output_type="pil",
121
+ ).images
122
+ elif model == "Fluently Epic":
123
+ images = pipe_epic(
124
+ prompt=prompt,
125
+ negative_prompt=negative_prompt,
126
+ width=width,
127
+ height=height,
128
+ guidance_scale=guidance_scale,
129
+ num_inference_steps=30,
130
+ num_images_per_prompt=1,
131
+ output_type="pil",
132
+ ).images
133
+ else:
134
+ images = pipe_xl(
135
+ prompt=prompt,
136
+ negative_prompt=negative_prompt,
137
+ width=width,
138
+ height=height,
139
+ guidance_scale=guidance_scale,
140
+ num_inference_steps=25,
141
+ num_images_per_prompt=1,
142
+ output_type="pil",
143
+ ).images
144
+
145
+ image_paths = [save_image(img) for img in images]
146
+ print(image_paths)
147
+ return image_paths, seed
148
+
149
+
150
+
151
+
152
+ examples = [
153
+ "neon holography crystal cat",
154
+ "a cat eating a piece of cheese",
155
+ "an astronaut riding a horse in space",
156
+ "a cartoon of a boy playing with a tiger",
157
+ "a cute robot artist painting on an easel, concept art",
158
+ "a close up of a woman wearing a transparent, prismatic, elaborate nemeses headdress, over the should pose, brown skin-tone"
159
+ ]
160
+
161
+ css = '''
162
+ .gradio-container{max-width: 560px !important}
163
+ h1{text-align:center}
164
+ footer {
165
+ visibility: hidden
166
+ }
167
+ '''
168
+ with gr.Blocks(title="Fluently Playground", css=css) as demo:
169
+ gr.Markdown(DESCRIPTION)
170
+ gr.DuplicateButton(
171
+ value="Duplicate Space for private use",
172
+ elem_id="duplicate-button",
173
+ visible=False,
174
+ )
175
+
176
+ with gr.Row():
177
+ model = gr.Radio(
178
+ label="Model",
179
+ choices=["Fluently XL v1","Fluently v3.5", "Fluently Anime", "Fluently Epic"],
180
+ value="Fluently v3.5",
181
+ interactive=True
182
+ )
183
+ with gr.Group():
184
+ with gr.Row():
185
+ prompt = gr.Text(
186
+ label="Prompt",
187
+ show_label=False,
188
+ max_lines=1,
189
+ placeholder="Enter your prompt",
190
+ container=False,
191
+ )
192
+ run_button = gr.Button("Run", scale=0)
193
+ result = gr.Gallery(label="Result", columns=1, preview=True, show_label=False)
194
+ with gr.Accordion("Advanced options", open=False):
195
+ use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=False)
196
+ negative_prompt = gr.Text(
197
+ label="Negative prompt",
198
+ max_lines=3,
199
+ value="""(deformed, distorted, disfigured:1.3), poorly drawn, bad anatomy, wrong anatomy, extra limb, missing limb, floating limbs, (mutated hands and fingers:1.4), disconnected limbs, mutation, mutated, ugly, disgusting, blurry, amputation""",
200
+ placeholder="Enter a negative prompt",
201
+ visible=False,
202
+ )
203
+ seed = gr.Slider(
204
+ label="Seed",
205
+ minimum=0,
206
+ maximum=MAX_SEED,
207
+ step=1,
208
+ value=0,
209
+ visible=True
210
+ )
211
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
212
+ with gr.Row(visible=True):
213
+ width = gr.Slider(
214
+ label="Width",
215
+ minimum=256,
216
+ maximum=1024,
217
+ step=8,
218
+ value=512,
219
+ )
220
+ height = gr.Slider(
221
+ label="Height",
222
+ minimum=256,
223
+ maximum=1024,
224
+ step=8,
225
+ value=512,
226
+ )
227
+ with gr.Row():
228
+ guidance_scale = gr.Slider(
229
+ label="Guidance Scale",
230
+ minimum=0.1,
231
+ maximum=20.0,
232
+ step=0.1,
233
+ value=5.5,
234
+ )
235
+
236
+ gr.Examples(
237
+ examples=examples,
238
+ inputs=prompt,
239
+ outputs=[result, seed],
240
+ fn=generate,
241
+ cache_examples=False,
242
+ )
243
+
244
+ use_negative_prompt.change(
245
+ fn=lambda x: gr.update(visible=x),
246
+ inputs=use_negative_prompt,
247
+ outputs=negative_prompt,
248
+ api_name=False,
249
+ )
250
+
251
+
252
+ gr.on(
253
+ triggers=[
254
+ prompt.submit,
255
+ negative_prompt.submit,
256
+ run_button.click,
257
+ ],
258
+ fn=generate,
259
+ inputs=[
260
+ prompt,
261
+ negative_prompt,
262
+ use_negative_prompt,
263
+ seed,
264
+ width,
265
+ height,
266
+ guidance_scale,
267
+ randomize_seed,
268
+ model,
269
+ ],
270
+ outputs=[result, seed],
271
+ api_name="run",
272
+ )
273
+
274
+ if __name__ == "__main__":
275
+ demo.queue(max_size=20).launch(show_api=False, debug=False)