artificialguybr commited on
Commit
920f544
1 Parent(s): 9efde7c

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +362 -0
app.py ADDED
@@ -0,0 +1,362 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gc
3
+ import gradio as gr
4
+ import numpy as np
5
+ import torch
6
+ import json
7
+ import spaces
8
+ import config
9
+ import utils
10
+ import logging
11
+ from PIL import Image, PngImagePlugin
12
+ from datetime import datetime
13
+ from diffusers.models import AutoencoderKL
14
+ from diffusers import StableDiffusionXLPipeline, StableDiffusionXLImg2ImgPipeline
15
+
16
+ logging.basicConfig(level=logging.INFO)
17
+ logger = logging.getLogger(__name__)
18
+
19
+ DESCRIPTION = "Animagine XL 3.0"
20
+ if not torch.cuda.is_available():
21
+ DESCRIPTION += "\n<p>Running on CPU 🥶 This demo does not work on CPU. </p>"
22
+ IS_COLAB = utils.is_google_colab() or os.getenv("IS_COLAB") == "1"
23
+ HF_TOKEN = os.getenv("HF_TOKEN")
24
+ CACHE_EXAMPLES = torch.cuda.is_available() and os.getenv("CACHE_EXAMPLES") == "1"
25
+ MIN_IMAGE_SIZE = int(os.getenv("MIN_IMAGE_SIZE", "512"))
26
+ MAX_IMAGE_SIZE = int(os.getenv("MAX_IMAGE_SIZE", "2048"))
27
+ USE_TORCH_COMPILE = os.getenv("USE_TORCH_COMPILE") == "1"
28
+ ENABLE_CPU_OFFLOAD = os.getenv("ENABLE_CPU_OFFLOAD") == "1"
29
+ OUTPUT_DIR = os.getenv("OUTPUT_DIR", "./outputs")
30
+
31
+ MODEL = os.getenv(
32
+ "MODEL",
33
+ "https://huggingface.co/cagliostrolab/animagine-xl-3.0/blob/main/animagine-xl-3.0.safetensors",
34
+ )
35
+
36
+ torch.backends.cudnn.deterministic = True
37
+ torch.backends.cudnn.benchmark = False
38
+
39
+ device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
40
+
41
+
42
+ def load_pipeline(model_name):
43
+ vae = AutoencoderKL.from_pretrained(
44
+ "madebyollin/sdxl-vae-fp16-fix",
45
+ torch_dtype=torch.float16,
46
+ )
47
+ pipeline = (
48
+ StableDiffusionXLPipeline.from_single_file
49
+ if MODEL.endswith(".safetensors")
50
+ else StableDiffusionXLPipeline.from_pretrained
51
+ )
52
+
53
+ pipe = pipeline(
54
+ model_name,
55
+ vae=vae,
56
+ torch_dtype=torch.float16,
57
+ custom_pipeline="lpw_stable_diffusion_xl",
58
+ use_safetensors=True,
59
+ add_watermarker=False,
60
+ use_auth_token=HF_TOKEN,
61
+ variant="fp16",
62
+ )
63
+
64
+ pipe.to(device)
65
+ return pipe
66
+
67
+
68
+ @spaces.GPU
69
+ def generate(
70
+ prompt: str,
71
+ negative_prompt: str = "",
72
+ seed: int = 0,
73
+ custom_width: int = 1024,
74
+ custom_height: int = 1024,
75
+ guidance_scale: float = 7.0,
76
+ num_inference_steps: int = 28,
77
+ sampler: str = "Euler a",
78
+ aspect_ratio_selector: str = "896 x 1152",
79
+ use_upscaler: bool = False,
80
+ upscaler_strength: float = 0.55,
81
+ upscale_by: float = 1.5,
82
+ progress=gr.Progress(track_tqdm=True),
83
+ ) -> Image:
84
+ generator = utils.seed_everything(seed)
85
+
86
+ width, height = utils.aspect_ratio_handler(
87
+ aspect_ratio_selector,
88
+ custom_width,
89
+ custom_height,
90
+ )
91
+
92
+ width, height = utils.preprocess_image_dimensions(width, height)
93
+
94
+ backup_scheduler = pipe.scheduler
95
+ pipe.scheduler = utils.get_scheduler(pipe.scheduler.config, sampler)
96
+
97
+ if use_upscaler:
98
+ upscaler_pipe = StableDiffusionXLImg2ImgPipeline(**pipe.components)
99
+ metadata = {
100
+ "prompt": prompt,
101
+ "negative_prompt": negative_prompt,
102
+ "resolution": f"{width} x {height}",
103
+ "guidance_scale": guidance_scale,
104
+ "num_inference_steps": num_inference_steps,
105
+ "seed": seed,
106
+ "sampler": sampler,
107
+ }
108
+
109
+ if use_upscaler:
110
+ new_width = int(width * upscale_by)
111
+ new_height = int(height * upscale_by)
112
+ metadata["use_upscaler"] = {
113
+ "upscale_method": "nearest-exact",
114
+ "upscaler_strength": upscaler_strength,
115
+ "upscale_by": upscale_by,
116
+ "new_resolution": f"{new_width} x {new_height}",
117
+ }
118
+ else:
119
+ metadata["use_upscaler"] = None
120
+ logger.info(json.dumps(metadata, indent=4))
121
+
122
+ try:
123
+ if use_upscaler:
124
+ latents = pipe(
125
+ prompt=prompt,
126
+ negative_prompt=negative_prompt,
127
+ width=width,
128
+ height=height,
129
+ guidance_scale=guidance_scale,
130
+ num_inference_steps=num_inference_steps,
131
+ generator=generator,
132
+ output_type="latent",
133
+ ).images
134
+ upscaled_latents = utils.upscale(latents, "nearest-exact", upscale_by)
135
+ images = upscaler_pipe(
136
+ prompt=prompt,
137
+ negative_prompt=negative_prompt,
138
+ image=upscaled_latents,
139
+ guidance_scale=guidance_scale,
140
+ num_inference_steps=num_inference_steps,
141
+ strength=upscaler_strength,
142
+ generator=generator,
143
+ output_type="pil",
144
+ ).images
145
+ else:
146
+ images = pipe(
147
+ prompt=prompt,
148
+ negative_prompt=negative_prompt,
149
+ width=width,
150
+ height=height,
151
+ guidance_scale=guidance_scale,
152
+ num_inference_steps=num_inference_steps,
153
+ generator=generator,
154
+ output_type="pil",
155
+ ).images
156
+
157
+ if images and IS_COLAB:
158
+ for image in images:
159
+ filepath = utils.save_image(image, metadata, OUTPUT_DIR)
160
+ logger.info(f"Image saved as {filepath} with metadata")
161
+
162
+ return images, metadata
163
+ except Exception as e:
164
+ logger.exception(f"An error occurred: {e}")
165
+ raise
166
+ finally:
167
+ if use_upscaler:
168
+ del upscaler_pipe
169
+ pipe.scheduler = backup_scheduler
170
+ utils.free_memory()
171
+
172
+
173
+ if torch.cuda.is_available():
174
+ pipe = load_pipeline(MODEL)
175
+ logger.info("Loaded on Device!")
176
+ else:
177
+ pipe = None
178
+
179
+ with gr.Blocks(css="style.css") as demo:
180
+ title = gr.HTML(
181
+ f"""<h1><span>{DESCRIPTION}</span></h1>""",
182
+ elem_id="title",
183
+ )
184
+ gr.Markdown(
185
+ f"""Gradio demo for [cagliostrolab/animagine-xl-3.0](https://huggingface.co/cagliostrolab/animagine-xl-3.0)""",
186
+ elem_id="subtitle",
187
+ )
188
+ gr.DuplicateButton(
189
+ value="Duplicate Space for private use",
190
+ elem_id="duplicate-button",
191
+ visible=os.getenv("SHOW_DUPLICATE_BUTTON") == "1",
192
+ )
193
+ with gr.Group():
194
+ with gr.Row():
195
+ prompt = gr.Text(
196
+ label="Prompt",
197
+ show_label=False,
198
+ max_lines=5,
199
+ placeholder="Enter your prompt",
200
+ container=False,
201
+ )
202
+ run_button = gr.Button(
203
+ "Generate",
204
+ variant="primary",
205
+ scale=0
206
+ )
207
+ result = gr.Gallery(
208
+ label="Result",
209
+ columns=1,
210
+ preview=True,
211
+ show_label=False
212
+ )
213
+ with gr.Accordion(label="Advanced Settings", open=False):
214
+ negative_prompt = gr.Text(
215
+ label="Negative Prompt",
216
+ max_lines=5,
217
+ placeholder="Enter a negative prompt",
218
+ )
219
+ aspect_ratio_selector = gr.Radio(
220
+ label="Aspect Ratio",
221
+ choices=config.aspect_ratios,
222
+ value="896 x 1152",
223
+ container=True,
224
+ )
225
+ with gr.Group(visible=False) as custom_resolution:
226
+ with gr.Row():
227
+ custom_width = gr.Slider(
228
+ label="Width",
229
+ minimum=MIN_IMAGE_SIZE,
230
+ maximum=MAX_IMAGE_SIZE,
231
+ step=8,
232
+ value=1024,
233
+ )
234
+ custom_height = gr.Slider(
235
+ label="Height",
236
+ minimum=MIN_IMAGE_SIZE,
237
+ maximum=MAX_IMAGE_SIZE,
238
+ step=8,
239
+ value=1024,
240
+ )
241
+ use_upscaler = gr.Checkbox(label="Use Upscaler", value=False)
242
+ with gr.Row() as upscaler_row:
243
+ upscaler_strength = gr.Slider(
244
+ label="Strength",
245
+ minimum=0,
246
+ maximum=1,
247
+ step=0.05,
248
+ value=0.55,
249
+ visible=False,
250
+ )
251
+ upscale_by = gr.Slider(
252
+ label="Upscale by",
253
+ minimum=1,
254
+ maximum=1.5,
255
+ step=0.1,
256
+ value=1.5,
257
+ visible=False,
258
+ )
259
+
260
+ sampler = gr.Dropdown(
261
+ label="Sampler",
262
+ choices=config.sampler_list,
263
+ interactive=True,
264
+ value="Euler a",
265
+ )
266
+ with gr.Row():
267
+ seed = gr.Slider(
268
+ label="Seed", minimum=0, maximum=utils.MAX_SEED, step=1, value=0
269
+ )
270
+ randomize_seed = gr.Checkbox(label="Randomize seed", value=True)
271
+ with gr.Group():
272
+ with gr.Row():
273
+ guidance_scale = gr.Slider(
274
+ label="Guidance scale",
275
+ minimum=1,
276
+ maximum=12,
277
+ step=0.1,
278
+ value=7.0,
279
+ )
280
+ num_inference_steps = gr.Slider(
281
+ label="Number of inference steps",
282
+ minimum=1,
283
+ maximum=50,
284
+ step=1,
285
+ value=28,
286
+ )
287
+ with gr.Accordion(label="Generation Parameters", open=False):
288
+ gr_metadata = gr.JSON(label="Metadata", show_label=False)
289
+ gr.Examples(
290
+ examples=config.examples,
291
+ inputs=prompt,
292
+ outputs=[result, gr_metadata],
293
+ fn=lambda *args, **kwargs: generate(*args, use_upscaler=True, **kwargs),
294
+ cache_examples=CACHE_EXAMPLES,
295
+ )
296
+ use_upscaler.change(
297
+ fn=lambda x: [gr.update(visible=x), gr.update(visible=x)],
298
+ inputs=use_upscaler,
299
+ outputs=[upscaler_strength, upscale_by],
300
+ queue=False,
301
+ api_name=False,
302
+ )
303
+ aspect_ratio_selector.change(
304
+ fn=lambda x: gr.update(visible=x == "Custom"),
305
+ inputs=aspect_ratio_selector,
306
+ outputs=custom_resolution,
307
+ queue=False,
308
+ api_name=False,
309
+ )
310
+
311
+ inputs = [
312
+ prompt,
313
+ negative_prompt,
314
+ seed,
315
+ custom_width,
316
+ custom_height,
317
+ guidance_scale,
318
+ num_inference_steps,
319
+ sampler,
320
+ aspect_ratio_selector,
321
+ use_upscaler,
322
+ upscaler_strength,
323
+ upscale_by,
324
+ ]
325
+
326
+ prompt.submit(
327
+ fn=utils.randomize_seed_fn,
328
+ inputs=[seed, randomize_seed],
329
+ outputs=seed,
330
+ queue=False,
331
+ api_name=False,
332
+ ).then(
333
+ fn=generate,
334
+ inputs=inputs,
335
+ outputs=result,
336
+ api_name="run",
337
+ )
338
+ negative_prompt.submit(
339
+ fn=utils.randomize_seed_fn,
340
+ inputs=[seed, randomize_seed],
341
+ outputs=seed,
342
+ queue=False,
343
+ api_name=False,
344
+ ).then(
345
+ fn=generate,
346
+ inputs=inputs,
347
+ outputs=result,
348
+ api_name=False,
349
+ )
350
+ run_button.click(
351
+ fn=utils.randomize_seed_fn,
352
+ inputs=[seed, randomize_seed],
353
+ outputs=seed,
354
+ queue=False,
355
+ api_name=False,
356
+ ).then(
357
+ fn=generate,
358
+ inputs=inputs,
359
+ outputs=[result, gr_metadata],
360
+ api_name=False,
361
+ )
362
+ demo.queue(max_size=20).launch(debug=IS_COLAB, share=IS_COLAB)