Manjushri commited on
Commit
d6f2c7d
1 Parent(s): a8501e9

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +12 -2
app.py CHANGED
@@ -4,10 +4,20 @@ import modin.pandas as pd
4
  from diffusers import DiffusionPipeline
5
 
6
  device = "cuda" if torch.cuda.is_available() else "cpu"
7
-
 
 
 
 
 
 
 
 
 
 
 
8
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", use_safetensors=True)
9
  pipe = pipe.to(device)
10
-
11
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True)
12
  refiner = refiner.to(device)
13
 
 
4
  from diffusers import DiffusionPipeline
5
 
6
  device = "cuda" if torch.cuda.is_available() else "cpu"
7
+ if torch.cuda.is_available():
8
+ PYTORCH_CUDA_ALLOC_CONF={'max_split_size_mb': 6000}
9
+ torch.cuda.max_memory_allocated(device=device)
10
+ torch.cuda.empty_cache()
11
+ pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
12
+ pipe.enable_xformers_memory_efficient_attention()
13
+ pipe = pipe.to(device)
14
+ torch.cuda.empty_cache()
15
+ refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True, torch_dtype=torch.float16, variant="fp16")
16
+ refiner.enable_xformers_memory_efficient_attention()
17
+ refiner.enable_sequential_cpu_offload()
18
+ else:
19
  pipe = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", use_safetensors=True)
20
  pipe = pipe.to(device)
 
21
  refiner = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-refiner-1.0", use_safetensors=True)
22
  refiner = refiner.to(device)
23