Manjushri commited on
Commit
4406e6c
1 Parent(s): 7ac19b5

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -2
app.py CHANGED
@@ -3,7 +3,7 @@ import torch
3
  import numpy as np
4
  import modin.pandas as pd
5
  from PIL import Image
6
- from diffusers import DiffusionPipeline, StableDiffusion3Pipeline
7
  from huggingface_hub import hf_hub_download
8
 
9
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
@@ -58,7 +58,7 @@ def genie (Model, Prompt, negative_prompt, height, width, scale, steps, seed, re
58
 
59
  torch.cuda.empty_cache()
60
  torch.cuda.max_memory_allocated(device=device)
61
- sdxl = StableDiffusion3Pipeline.from_pretrained("stabilityai/stable-diffusion-3-medium-diffusers", torch_dtype=torch.float16)#variant="fp16", use_safetensors=True)
62
  sdxl.enable_xformers_memory_efficient_attention()
63
  sdxl = sdxl.to(device)
64
  torch.cuda.empty_cache()
 
3
  import numpy as np
4
  import modin.pandas as pd
5
  from PIL import Image
6
+ from diffusers import DiffusionPipeline, #StableDiffusion3Pipeline
7
  from huggingface_hub import hf_hub_download
8
 
9
  device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
58
 
59
  torch.cuda.empty_cache()
60
  torch.cuda.max_memory_allocated(device=device)
61
+ sdxl = DiffusionPipeline.from_pretrained("stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
62
  sdxl.enable_xformers_memory_efficient_attention()
63
  sdxl = sdxl.to(device)
64
  torch.cuda.empty_cache()