ChenoAi commited on
Commit
60f6770
1 Parent(s): cf4309e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +119 -105
app.py CHANGED
@@ -1,22 +1,32 @@
1
- #!/usr/bin/env python
2
 
3
- import os
4
- import random
5
- import uuid
 
 
 
 
 
 
 
 
 
 
 
6
 
7
  import gradio as gr
 
 
8
  import numpy as np
9
- from PIL import Image
 
10
  import spaces
11
- import torch
12
- from diffusers import StableDiffusionXLPipeline, EulerAncestralDiscreteScheduler
13
-
14
- DESCRIPTION = """
15
- # Juggernaut X v10
16
- """
17
 
18
  def save_image(img):
19
- unique_name = str(uuid.uuid4()) + ".jpeg"
20
  img.save(unique_name)
21
  return unique_name
22
 
@@ -26,34 +36,37 @@ def randomize_seed_fn(seed: int, randomize_seed: bool) -> int:
26
  return seed
27
 
28
  MAX_SEED = np.iinfo(np.int32).max
 
29
 
30
- if not torch.cuda.is_available():
31
- DESCRIPTION += "\n<p>Running on CPU 🥶 This demo may not work on CPU.</p>"
32
-
33
- MAX_SEED = np.iinfo(np.int32).max
34
 
35
- USE_TORCH_COMPILE = 0
36
- ENABLE_CPU_OFFLOAD = 0
 
 
 
 
37
 
38
 
39
- if torch.cuda.is_available():
40
- pipe = StableDiffusionXLPipeline.from_pretrained(
41
  "RunDiffusion/Juggernaut-X-v10",
42
- torch_dtype=torch.float16
 
43
  )
44
- #pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
45
-
46
-
47
- #pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
48
- #pipe.set_adapters("dalle")
49
 
50
- pipe.to("cuda")
51
-
52
-
53
 
54
- @spaces.GPU(enable_queue=True)
55
- def generate(
56
- prompt: str,
 
 
 
 
 
 
 
 
 
57
  negative_prompt: str = "",
58
  use_negative_prompt: bool = False,
59
  num_inference_steps: int = 30,
@@ -65,15 +78,22 @@ def generate(
65
  randomize_seed: bool = False,
66
  progress=gr.Progress(track_tqdm=True),
67
  ):
68
-
69
-
70
  seed = int(randomize_seed_fn(seed, randomize_seed))
71
-
72
  if not use_negative_prompt:
73
- negative_prompt = "" # type: ignore
 
 
 
 
 
 
 
 
 
 
 
74
 
75
- images = pipe(
76
- prompt=prompt,
77
  negative_prompt=negative_prompt,
78
  width=width,
79
  height=height,
@@ -83,42 +103,35 @@ def generate(
83
  cross_attention_kwargs={"scale": 0.65},
84
  output_type="pil",
85
  ).images
86
- image_paths = [save_image(img) for img in images]
87
- print(image_paths)
88
- return image_paths, seed
89
-
90
- examples = [
91
- "neon holography crystal cat",
92
- "a cat eating a piece of cheese",
93
- "an astronaut riding a horse in space",
94
- "a cartoon of a boy playing with a tiger",
95
- "a cute robot artist painting on an easel, concept art",
96
- "a close up of a woman wearing a transparent, prismatic, elaborate nemeses headdress, over the should pose, brown skin-tone"
97
- ]
98
 
99
- css = '''
100
- .gradio-container{max-width: 560px !important}
101
- h1{text-align:center}
102
- '''
103
- with gr.Blocks(css=css) as demo:
104
- gr.Markdown(DESCRIPTION)
105
- gr.DuplicateButton(
106
- value="Duplicate Space for private use",
107
- elem_id="duplicate-button",
108
- visible=False,
109
- )
 
 
 
 
 
 
 
 
 
 
110
 
111
- with gr.Group():
112
- with gr.Row():
113
- prompt = gr.Text(
114
- label="Prompt",
115
- show_label=False,
116
- max_lines=1,
117
- placeholder="Enter your prompt",
118
- container=False,
119
- )
120
- run_button = gr.Button("Run", scale=0)
121
- result = gr.Gallery(label="Result", columns=1, preview=True, show_label=False)
122
  with gr.Accordion("Advanced options", open=False):
123
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
124
  negative_prompt = gr.Text(
@@ -178,46 +191,47 @@ with gr.Blocks(css=css) as demo:
178
  value=6,
179
  )
180
 
181
-
182
-
183
- gr.Examples(
184
- examples=examples,
185
- inputs=prompt,
186
- outputs=[result, seed],
187
- fn=generate,
188
- cache_examples=False,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
  )
190
-
191
  use_negative_prompt.change(
192
  fn=lambda x: gr.update(visible=x),
193
  inputs=use_negative_prompt,
194
  outputs=negative_prompt,
195
  api_name=False,
196
  )
197
-
198
-
199
- gr.on(
200
- triggers=[
201
- prompt.submit,
202
- negative_prompt.submit,
203
- run_button.click,
204
- ],
205
- fn=generate,
206
- inputs=[
207
- prompt,
208
- negative_prompt,
209
- use_negative_prompt,
210
- num_inference_steps,
211
- num_images_per_prompt,
212
- seed,
213
- width,
214
- height,
215
- guidance_scale,
216
- randomize_seed,
217
- ],
218
- outputs=[result, seed],
219
- api_name="run",
220
  )
221
-
222
  if __name__ == "__main__":
223
  demo.queue(max_size=20).launch(show_api=False, debug=False)
 
 
1
 
2
+ if torch.cuda.is_available():
3
+ pipe = StableDiffusionXLPipeline.from_pretrained(
4
+ "RunDiffusion/Juggernaut-X-v10",
5
+ torch_dtype=torch.float16
6
+ )
7
+ #pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)
8
+
9
+
10
+ #pipe.load_lora_weights("ehristoforu/dalle-3-xl-v2", weight_name="dalle-3-xl-lora-v2.safetensors", adapter_name="dalle")
11
+ #pipe.set_adapters("dalle")
12
+
13
+ pipe.to("cuda")
14
+
15
+
16
 
17
  import gradio as gr
18
+ from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDiscreteScheduler, LCMScheduler, AutoencoderKL,DiffusionPipeline
19
+ import torch
20
  import numpy as np
21
+ from huggingface_hub import hf_hub_download
22
+ from safetensors.torch import load_file
23
  import spaces
24
+ import os
25
+ import random
26
+ import uuid
 
 
 
27
 
28
  def save_image(img):
29
+ unique_name = str(uuid.uuid4()) + ".png"
30
  img.save(unique_name)
31
  return unique_name
32
 
 
36
  return seed
37
 
38
  MAX_SEED = np.iinfo(np.int32).max
39
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
40
 
 
 
 
 
41
 
42
+ JX_pipe = StableDiffusionXLPipeline.from_pretrained(
43
+ "RunDiffusion/Juggernaut-X-Hyper",
44
+ vae=vae,
45
+ torch_dtype=torch.float16,
46
+ )
47
+ JX_pipe.to("cuda")
48
 
49
 
50
+ J10_pipe = StableDiffusionXLPipeline.from_pretrained(
 
51
  "RunDiffusion/Juggernaut-X-v10",
52
+ vae=vae,
53
+ torch_dtype=torch.float16,
54
  )
55
+ J10_pipe.to("cuda")
 
 
 
 
56
 
 
 
 
57
 
58
+ J9_pipe = StableDiffusionXLPipeline.from_pretrained(
59
+ "RunDiffusion/Juggernaut-X-v10",
60
+ vae=vae,
61
+ torch_dtype=torch.float16,
62
+ use_safetensors=True,
63
+ add_watermarker=False,
64
+ variant="fp16"
65
+ )
66
+ J9_pipe.to("cuda")
67
+
68
+ @spaces.GPU
69
+ def run_comparison(prompt: str,
70
  negative_prompt: str = "",
71
  use_negative_prompt: bool = False,
72
  num_inference_steps: int = 30,
 
78
  randomize_seed: bool = False,
79
  progress=gr.Progress(track_tqdm=True),
80
  ):
 
 
81
  seed = int(randomize_seed_fn(seed, randomize_seed))
 
82
  if not use_negative_prompt:
83
+ negative_prompt = ""
84
+ image_r3 = JX_pipe(prompt=prompt,
85
+ negative_prompt=negative_prompt,
86
+ width=width,
87
+ height=height,
88
+ guidance_scale=guidance_scale,
89
+ num_inference_steps=num_inference_steps,
90
+ num_images_per_prompt=num_images_per_prompt,
91
+ cross_attention_kwargs={"scale": 0.65},
92
+ output_type="pil",
93
+ ).images
94
+ image_paths_r3 = [save_image(img) for img in image_r3]
95
 
96
+ image_r4 = JX10_pipe(prompt=prompt,
 
97
  negative_prompt=negative_prompt,
98
  width=width,
99
  height=height,
 
103
  cross_attention_kwargs={"scale": 0.65},
104
  output_type="pil",
105
  ).images
106
+ image_paths_r4 = [save_image(img) for img in image_r4]
 
 
 
 
 
 
 
 
 
 
 
107
 
108
+ image_r5 = JX9_pipe(prompt=prompt,
109
+ negative_prompt=negative_prompt,
110
+ width=width,
111
+ height=height,
112
+ guidance_scale=guidance_scale,
113
+ num_inference_steps=num_inference_steps,
114
+ num_images_per_prompt=num_images_per_prompt,
115
+ cross_attention_kwargs={"scale": 0.65},
116
+ output_type="pil",
117
+ ).images
118
+ image_paths_r5 = [save_image(img) for img in image_r5]
119
+ return image_paths_r3, image_paths_r4,image_paths_r5, seed
120
+
121
+ examples = ["A dignified beaver wearing glasses, a vest, and colorful neck tie.",
122
+ "The spirit of a tamagotchi wandering in the city of Barcelona",
123
+ "an ornate, high-backed mahogany chair with a red cushion",
124
+ "a sketch of a camel next to a stream",
125
+ "a delicate porcelain teacup sits on a saucer, its surface adorned with intricate blue patterns",
126
+ "a baby swan grafitti",
127
+ "A bald eagle made of chocolate powder, mango, and whipped cream"
128
+ ]
129
 
130
+ with gr.Blocks() as demo:
131
+ gr.Markdown("## One step SDXL comparison 🦶")
132
+ gr.Markdown('Compare SDXL variants and distillations able to generate images in a single diffusion step')
133
+ prompt = gr.Textbox(label="Prompt")
134
+ run = gr.Button("Run")
 
 
 
 
 
 
135
  with gr.Accordion("Advanced options", open=False):
136
  use_negative_prompt = gr.Checkbox(label="Use negative prompt", value=True)
137
  negative_prompt = gr.Text(
 
191
  value=6,
192
  )
193
 
194
+ with gr.Row():
195
+ with gr.Column():
196
+ image_r3 = gr.Gallery(label="RealVisXL V3",columns=1, preview=True,)
197
+ gr.Markdown("## [RealVisXL V3](https://huggingface.co)")
198
+ with gr.Column():
199
+ image_r4 = gr.Gallery(label="RealVisXL V4",columns=1, preview=True,)
200
+ gr.Markdown("## [RealVisXL V4](https://huggingface.co)")
201
+ with gr.Column():
202
+ image_r5 = gr.Gallery(label="Playground v2.5",columns=1, preview=True,)
203
+ gr.Markdown("## [Playground v2.5](https://huggingface.co)")
204
+ image_outputs = [image_r3, image_r4, image_r5]
205
+ gr.on(
206
+ triggers=[prompt.submit, run.click],
207
+ fn=run_comparison,
208
+ inputs=[
209
+ prompt,
210
+ negative_prompt,
211
+ use_negative_prompt,
212
+ num_inference_steps,
213
+ num_images_per_prompt,
214
+ seed,
215
+ width,
216
+ height,
217
+ guidance_scale,
218
+ randomize_seed,
219
+ ],
220
+ outputs=image_outputs
221
  )
 
222
  use_negative_prompt.change(
223
  fn=lambda x: gr.update(visible=x),
224
  inputs=use_negative_prompt,
225
  outputs=negative_prompt,
226
  api_name=False,
227
  )
228
+ gr.Examples(
229
+ examples=examples,
230
+ fn=run_comparison,
231
+ inputs=prompt,
232
+ outputs=image_outputs,
233
+ cache_examples=False,
234
+ run_on_click=True
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
235
  )
 
236
  if __name__ == "__main__":
237
  demo.queue(max_size=20).launch(show_api=False, debug=False)