Manjushri commited on
Commit
c73951b
1 Parent(s): 8ec67ee

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -3
app.py CHANGED
@@ -255,13 +255,13 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
255
  return image
256
 
257
  if Model == "SDXL 1.0":
258
- from diffusers import StableCascadeCombinedPipeline
259
 
260
- sdxl = StableCascadeCombinedPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16)
261
 
262
  torch.cuda.empty_cache()
263
  torch.cuda.max_memory_allocated(device=device)
264
- #sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
265
  sdxl.enable_xformers_memory_efficient_attention()
266
  sdxl = sdxl.to(device)
267
  torch.cuda.empty_cache()
 
255
  return image
256
 
257
  if Model == "SDXL 1.0":
258
+ #from diffusers import StableCascadeCombinedPipeline
259
 
260
+ #sdxl = StableCascadeCombinedPipeline.from_pretrained("stabilityai/stable-cascade", variant="bf16", torch_dtype=torch.bfloat16)
261
 
262
  torch.cuda.empty_cache()
263
  torch.cuda.max_memory_allocated(device=device)
264
+ sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
265
  sdxl.enable_xformers_memory_efficient_attention()
266
  sdxl = sdxl.to(device)
267
  torch.cuda.empty_cache()