Wauplin HF staff commited on
Commit
9a59460
1 Parent(s): c34c3cf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +17 -25
app.py CHANGED
@@ -12,7 +12,7 @@ from previewer.modules import Previewer
12
 
13
  from gallery_history import fetch_gallery_history, show_gallery_history
14
 
15
- os.environ["TOKENIZERS_PARALLELISM"] = "false"
16
 
17
  DESCRIPTION = "# Würstchen"
18
  DESCRIPTION += "\n<p style=\"text-align: center\"><a href='https://huggingface.co/warp-ai/wuerstchen' target='_blank'>Würstchen</a> is a new fast and efficient high resolution text-to-image architecture and model</p>"
@@ -29,12 +29,8 @@ PREVIEW_IMAGES = True
29
  dtype = torch.float16
30
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
31
  if torch.cuda.is_available():
32
- prior_pipeline = WuerstchenPriorPipeline.from_pretrained(
33
- "warp-ai/wuerstchen-prior", torch_dtype=dtype
34
- )
35
- decoder_pipeline = WuerstchenDecoderPipeline.from_pretrained(
36
- "warp-ai/wuerstchen", torch_dtype=dtype
37
- )
38
  if ENABLE_CPU_OFFLOAD:
39
  prior_pipeline.enable_model_cpu_offload()
40
  decoder_pipeline.enable_model_cpu_offload()
@@ -43,12 +39,8 @@ if torch.cuda.is_available():
43
  decoder_pipeline.to(device)
44
 
45
  if USE_TORCH_COMPILE:
46
- prior_pipeline.prior = torch.compile(
47
- prior_pipeline.prior, mode="reduce-overhead", fullgraph=True
48
- )
49
- decoder_pipeline.decoder = torch.compile(
50
- decoder_pipeline.decoder, mode="reduce-overhead", fullgraph=True
51
- )
52
 
53
  if PREVIEW_IMAGES:
54
  previewer = Previewer()
@@ -228,18 +220,18 @@ with gr.Blocks(css="style.css") as demo:
228
  history = show_gallery_history()
229
 
230
  inputs = [
231
- prompt,
232
- negative_prompt,
233
- seed,
234
- width,
235
- height,
236
- prior_num_inference_steps,
237
- # prior_timesteps,
238
- prior_guidance_scale,
239
- decoder_num_inference_steps,
240
- # decoder_timesteps,
241
- decoder_guidance_scale,
242
- num_images_per_prompt,
243
  ]
244
  prompt.submit(
245
  fn=randomize_seed_fn,
 
12
 
13
  from gallery_history import fetch_gallery_history, show_gallery_history
14
 
15
+ os.environ["TOKENIZERS_PARALLELISM"] = 'false'
16
 
17
  DESCRIPTION = "# Würstchen"
18
  DESCRIPTION += "\n<p style=\"text-align: center\"><a href='https://huggingface.co/warp-ai/wuerstchen' target='_blank'>Würstchen</a> is a new fast and efficient high resolution text-to-image architecture and model</p>"
 
29
  dtype = torch.float16
30
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
31
  if torch.cuda.is_available():
32
+ prior_pipeline = WuerstchenPriorPipeline.from_pretrained("warp-ai/wuerstchen-prior", torch_dtype=dtype)
33
+ decoder_pipeline = WuerstchenDecoderPipeline.from_pretrained("warp-ai/wuerstchen", torch_dtype=dtype)
 
 
 
 
34
  if ENABLE_CPU_OFFLOAD:
35
  prior_pipeline.enable_model_cpu_offload()
36
  decoder_pipeline.enable_model_cpu_offload()
 
39
  decoder_pipeline.to(device)
40
 
41
  if USE_TORCH_COMPILE:
42
+ prior_pipeline.prior = torch.compile(prior_pipeline.prior, mode="reduce-overhead", fullgraph=True)
43
+ decoder_pipeline.decoder = torch.compile(decoder_pipeline.decoder, mode="reduce-overhead", fullgraph=True)
 
 
 
 
44
 
45
  if PREVIEW_IMAGES:
46
  previewer = Previewer()
 
220
  history = show_gallery_history()
221
 
222
  inputs = [
223
+ prompt,
224
+ negative_prompt,
225
+ seed,
226
+ width,
227
+ height,
228
+ prior_num_inference_steps,
229
+ # prior_timesteps,
230
+ prior_guidance_scale,
231
+ decoder_num_inference_steps,
232
+ # decoder_timesteps,
233
+ decoder_guidance_scale,
234
+ num_images_per_prompt,
235
  ]
236
  prompt.submit(
237
  fn=randomize_seed_fn,