ChenoAi commited on
Commit
1bbffff
1 Parent(s): 04584d9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +1 -67
app.py CHANGED
@@ -114,29 +114,7 @@ J9_pipe = StableDiffusionXLPipeline.from_pretrained(
114
  )
115
  J9_pipe.to("cuda")
116
 
117
- J8_pipe = StableDiffusionXLPipeline.from_pretrained(
118
- "RunDiffusion/Juggernaut-XL-v8",
119
- vae=vae,
120
- torch_dtype=torch.float16,
121
- custom_pipeline="lpw_stable_diffusion_xl",
122
- )
123
- J8_pipe.to("cuda")
124
 
125
- J7_pipe = StableDiffusionXLPipeline.from_pretrained(
126
- "RunDiffusion/Juggernaut-XL-v7",
127
- vae=vae,
128
- torch_dtype=torch.float16,
129
- custom_pipeline="lpw_stable_diffusion_xl",
130
- )
131
- J7_pipe.to("cuda")
132
-
133
- J_pipe = StableDiffusionXLPipeline.from_pretrained(
134
- "RunDiffusion/Juggernaut-XL",
135
- vae=vae,
136
- torch_dtype=torch.float16,
137
- custom_pipeline="lpw_stable_diffusion_xl",
138
- )
139
- J_pipe.to("cuda")
140
 
141
  @spaces.GPU
142
  def run_comparison(prompt: str,
@@ -193,41 +171,6 @@ def run_comparison(prompt: str,
193
  ).images
194
  image_paths_r5 = [save_image(img) for img in image_r5]
195
 
196
- image_r6 = J8_pipe(prompt=prompt,
197
- negative_prompt=negative_prompt,
198
- width=width,
199
- height=height,
200
- guidance_scale=guidance_scale,
201
- num_inference_steps=num_inference_steps,
202
- num_images_per_prompt=num_images_per_prompt,
203
- cross_attention_kwargs={"scale": 0.65},
204
- output_type="pil",
205
- ).images
206
- image_paths_r6 = [save_image(img) for img in image_r6]
207
-
208
- image_r7 = J7_pipe(prompt=prompt,
209
- negative_prompt=negative_prompt,
210
- width=width,
211
- height=height,
212
- guidance_scale=guidance_scale,
213
- num_inference_steps=num_inference_steps,
214
- num_images_per_prompt=num_images_per_prompt,
215
- cross_attention_kwargs={"scale": 0.65},
216
- output_type="pil",
217
- ).images
218
- image_paths_r7 = [save_image(img) for img in image_r7]
219
-
220
- image_r8 = J_pipe(prompt=prompt,
221
- negative_prompt=negative_prompt,
222
- width=width,
223
- height=height,
224
- guidance_scale=guidance_scale,
225
- num_inference_steps=num_inference_steps,
226
- num_images_per_prompt=num_images_per_prompt,
227
- cross_attention_kwargs={"scale": 0.65},
228
- output_type="pil",
229
- ).images
230
- image_paths_r8 = [save_image(img) for img in image_r8]
231
 
232
  return image_paths_r3, image_paths_r4,image_paths_r5, seed
233
 
@@ -321,16 +264,7 @@ with gr.Blocks(theme=gr.themes.Base()) as demo:
321
  with gr.Column():
322
  image_r5 = gr.Gallery(label="Juggernaut-XL-9",columns=1, preview=True,)
323
  gr.Markdown("## [Juggernaut-XL-9](https://huggingface.co)")
324
- with gr.Column():
325
- image_r6 = gr.Gallery(label="Juggernaut-XL-8",columns=1, preview=True,)
326
- gr.Markdown("## [Juggernaut-XL-8](https://huggingface.co)")
327
- with gr.Column():
328
- image_r7 = gr.Gallery(label="Juggernaut-XL-7",columns=1, preview=True,)
329
- gr.Markdown("## [Juggernaut-XL-7](https://huggingface.co)")
330
- with gr.Column():
331
- image_r8 = gr.Gallery(label="Juggernaut-XL",columns=1, preview=True,)
332
- gr.Markdown("## [Juggernaut-XL](https://huggingface.co)")
333
- image_outputs = [image_r3, image_r4, image_r5, image_r6, image_r7, image_r8]
334
  gr.on(
335
  triggers=[prompt.submit, run.click],
336
  fn=run_comparison,
 
114
  )
115
  J9_pipe.to("cuda")
116
 
 
 
 
 
 
 
 
117
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
118
 
119
  @spaces.GPU
120
  def run_comparison(prompt: str,
 
171
  ).images
172
  image_paths_r5 = [save_image(img) for img in image_r5]
173
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
174
 
175
  return image_paths_r3, image_paths_r4,image_paths_r5, seed
176
 
 
264
  with gr.Column():
265
  image_r5 = gr.Gallery(label="Juggernaut-XL-9",columns=1, preview=True,)
266
  gr.Markdown("## [Juggernaut-XL-9](https://huggingface.co)")
267
+ image_outputs = [image_r3, image_r4, image_r5]
 
 
 
 
 
 
 
 
 
268
  gr.on(
269
  triggers=[prompt.submit, run.click],
270
  fn=run_comparison,