multimodalart HF staff commited on
Commit
de56cd9
1 Parent(s): 428d2aa

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -0
app.py CHANGED
@@ -3,8 +3,10 @@ from diffusers import StableDiffusionXLPipeline, UNet2DConditionModel, EulerDisc
3
  import torch
4
  from huggingface_hub import hf_hub_download
5
  from safetensors.torch import load_file
 
6
  import spaces
7
 
 
8
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
9
 
10
  ### SDXL Turbo ####
@@ -33,6 +35,7 @@ pipe_lightning = StableDiffusionXLPipeline.from_pretrained(base,
33
  variant="fp16"
34
  )#.to("cuda")
35
  del unet
 
36
  pipe_lightning.scheduler = EulerDiscreteScheduler.from_config(pipe_lightning.scheduler.config, timestep_spacing="trailing", prediction_type="sample")
37
  #pipe_lightning.to("cuda")
38
 
@@ -55,6 +58,7 @@ pipe_hyper = StableDiffusionXLPipeline.from_pretrained(base,
55
  pipe_hyper.scheduler = LCMScheduler.from_config(pipe_hyper.scheduler.config)
56
  #pipe_hyper.to("cuda")
57
  del unet
 
58
 
59
  @spaces.GPU
60
  def run_comparison(prompt):
 
3
  import torch
4
  from huggingface_hub import hf_hub_download
5
  from safetensors.torch import load_file
6
+ import gc
7
  import spaces
8
 
9
+
10
  vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
11
 
12
  ### SDXL Turbo ####
 
35
  variant="fp16"
36
  )#.to("cuda")
37
  del unet
38
+ gc.collect()
39
  pipe_lightning.scheduler = EulerDiscreteScheduler.from_config(pipe_lightning.scheduler.config, timestep_spacing="trailing", prediction_type="sample")
40
  #pipe_lightning.to("cuda")
41
 
 
58
  pipe_hyper.scheduler = LCMScheduler.from_config(pipe_hyper.scheduler.config)
59
  #pipe_hyper.to("cuda")
60
  del unet
61
+ gc.collect()
62
 
63
  @spaces.GPU
64
  def run_comparison(prompt):