Spaces:
Running
Running
Update main.py
Browse files
main.py
CHANGED
@@ -1,7 +1,8 @@
|
|
1 |
run_api = False
|
2 |
-
|
|
|
|
|
3 |
import os
|
4 |
-
|
5 |
# Use GPU
|
6 |
gpu_info = os.popen("nvidia-smi").read()
|
7 |
if "failed" in gpu_info:
|
@@ -47,8 +48,6 @@ import PIL
|
|
47 |
import base64
|
48 |
import io
|
49 |
import torch
|
50 |
-
from diffusers import UNet2DConditionModel, DiffusionPipeline, LCMScheduler
|
51 |
-
|
52 |
# SDXL
|
53 |
from diffusers import UNet2DConditionModel, DiffusionPipeline, LCMScheduler
|
54 |
|
@@ -67,51 +66,52 @@ SECRET_TOKEN = os.getenv("SECRET_TOKEN", "default_secret")
|
|
67 |
if is_gpu:
|
68 |
# Uncomment the following line if you want to enable CUDA launch blocking
|
69 |
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
|
70 |
-
torch_dtype=torch.float16
|
71 |
-
variant="fp16"
|
72 |
else:
|
73 |
# Uncomment the following line if you want to use CPU instead of GPU
|
74 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
75 |
-
torch_dtype=torch.float32
|
76 |
-
variant="fp32"
|
77 |
-
|
78 |
-
|
79 |
# Get the current directory
|
80 |
current_dir = os.getcwd()
|
81 |
model_path = os.path.join(current_dir)
|
82 |
-
|
83 |
# Set the cache path
|
84 |
cache_path = os.path.join(current_dir, "cache")
|
85 |
|
86 |
-
|
87 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
88 |
unet = UNet2DConditionModel.from_pretrained(
|
89 |
"latent-consistency/lcm-sdxl",
|
90 |
torch_dtype=torch_dtype,
|
91 |
variant=variant,
|
92 |
cache_dir=cache_path,
|
93 |
)
|
94 |
-
|
95 |
-
|
96 |
-
|
97 |
-
|
98 |
-
|
99 |
-
|
100 |
-
|
101 |
-
|
102 |
-
# cache_dir=cache_path,
|
103 |
-
# )
|
104 |
-
from diffusers import StableDiffusionPipeline
|
105 |
-
pipe = StableDiffusionPipeline.from_pretrained(model_id, torch_dtype=torch.float32)
|
106 |
-
|
107 |
-
|
108 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
109 |
if torch.cuda.is_available():
|
110 |
pipe.to("cuda")
|
111 |
-
|
112 |
# SSD-1B
|
113 |
from diffusers import LCMScheduler, AutoPipelineForText2Image
|
114 |
-
|
115 |
pipe = AutoPipelineForText2Image.from_pretrained(
|
116 |
"segmind/SSD-1B",
|
117 |
torch_dtype=torch.float16,
|
@@ -121,11 +121,13 @@ else:
|
|
121 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
122 |
if torch.cuda.is_available():
|
123 |
pipe.to("cuda")
|
124 |
-
|
125 |
# load and fuse
|
126 |
pipe.load_lora_weights("latent-consistency/lcm-lora-ssd-1b")
|
127 |
pipe.fuse_lora()
|
128 |
|
|
|
|
|
|
|
129 |
|
130 |
def generate(
|
131 |
prompt: str,
|
|
|
1 |
run_api = False
|
2 |
+
is_ssd = False
|
3 |
+
is_sdxl = True
|
4 |
+
is_sdxl_turbo=False
|
5 |
import os
|
|
|
6 |
# Use GPU
|
7 |
gpu_info = os.popen("nvidia-smi").read()
|
8 |
if "failed" in gpu_info:
|
|
|
48 |
import base64
|
49 |
import io
|
50 |
import torch
|
|
|
|
|
51 |
# SDXL
|
52 |
from diffusers import UNet2DConditionModel, DiffusionPipeline, LCMScheduler
|
53 |
|
|
|
66 |
if is_gpu:
|
67 |
# Uncomment the following line if you want to enable CUDA launch blocking
|
68 |
os.environ["CUDA_LAUNCH_BLOCKING"] = "1"
|
|
|
|
|
69 |
else:
|
70 |
# Uncomment the following line if you want to use CPU instead of GPU
|
71 |
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
|
|
|
|
|
|
|
|
|
72 |
# Get the current directory
|
73 |
current_dir = os.getcwd()
|
74 |
model_path = os.path.join(current_dir)
|
|
|
75 |
# Set the cache path
|
76 |
cache_path = os.path.join(current_dir, "cache")
|
77 |
|
78 |
+
def load_pipeline(use_cuda):
|
79 |
+
device = "cuda" if use_cuda and torch.cuda.is_available() else "cpu"
|
80 |
+
if device == "cuda":
|
81 |
+
torch.cuda.max_memory_allocated(device=device)
|
82 |
+
torch.cuda.empty_cache()
|
83 |
+
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", torch_dtype=torch.float16, variant="fp16", use_safetensors=True)
|
84 |
+
pipe.enable_xformers_memory_efficient_attention()
|
85 |
+
pipe = pipe.to(device)
|
86 |
+
torch.cuda.empty_cache()
|
87 |
+
else:
|
88 |
+
pipe = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo", use_safetensors=True)
|
89 |
+
pipe = pipe.to(device)
|
90 |
+
return pipe
|
91 |
+
|
92 |
+
if is_sdxl:
|
93 |
+
torch_dtype=torch.float16
|
94 |
+
variant="fp16"
|
95 |
unet = UNet2DConditionModel.from_pretrained(
|
96 |
"latent-consistency/lcm-sdxl",
|
97 |
torch_dtype=torch_dtype,
|
98 |
variant=variant,
|
99 |
cache_dir=cache_path,
|
100 |
)
|
101 |
+
model_id="stabilityai/stable-diffusion-xl-base-1.0"
|
102 |
+
pipe = DiffusionPipeline.from_pretrained(
|
103 |
+
model_id=model_id,
|
104 |
+
unet=unet,
|
105 |
+
torch_dtype=torch_dtype,
|
106 |
+
variant=variant,
|
107 |
+
cache_dir=cache_path,
|
108 |
+
)
|
|
|
|
|
|
|
|
|
|
|
|
|
109 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
110 |
if torch.cuda.is_available():
|
111 |
pipe.to("cuda")
|
112 |
+
if is_ssd:
|
113 |
# SSD-1B
|
114 |
from diffusers import LCMScheduler, AutoPipelineForText2Image
|
|
|
115 |
pipe = AutoPipelineForText2Image.from_pretrained(
|
116 |
"segmind/SSD-1B",
|
117 |
torch_dtype=torch.float16,
|
|
|
121 |
pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
|
122 |
if torch.cuda.is_available():
|
123 |
pipe.to("cuda")
|
|
|
124 |
# load and fuse
|
125 |
pipe.load_lora_weights("latent-consistency/lcm-lora-ssd-1b")
|
126 |
pipe.fuse_lora()
|
127 |
|
128 |
+
if is_sdxl_turbo:
|
129 |
+
use_cuda=is_gpu
|
130 |
+
pipe = load_pipeline(use_cuda)
|
131 |
|
132 |
def generate(
|
133 |
prompt: str,
|