Spaces:
Running
on
Zero
Running
on
Zero
Update app.py
Browse files
app.py
CHANGED
|
@@ -128,6 +128,7 @@ def _build_pipeline_cpu() -> DiffusionPipeline:
|
|
| 128 |
"""
|
| 129 |
log.info(f"Loading model backend: {MODEL_BACKEND}")
|
| 130 |
if MODEL_BACKEND == "sdxl_lcm_unet":
|
|
|
|
| 131 |
unet = UNet2DConditionModel.from_pretrained(
|
| 132 |
"latent-consistency/lcm-sdxl",
|
| 133 |
torch_dtype=torch.float32,
|
|
@@ -140,12 +141,17 @@ def _build_pipeline_cpu() -> DiffusionPipeline:
|
|
| 140 |
cache_dir=CACHE_DIR,
|
| 141 |
)
|
| 142 |
elif MODEL_BACKEND == "ssd1b_lcm_lora":
|
|
|
|
| 143 |
_p = AutoPipelineForText2Image.from_pretrained(
|
| 144 |
"segmind/SSD-1B",
|
| 145 |
torch_dtype=torch.float32,
|
| 146 |
cache_dir=CACHE_DIR,
|
| 147 |
)
|
| 148 |
-
_p.load_lora_weights(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
_p.fuse_lora()
|
| 150 |
else:
|
| 151 |
# Default: SDXL + LCM-LoRA (smaller download, great speed/quality)
|
|
@@ -154,7 +160,11 @@ def _build_pipeline_cpu() -> DiffusionPipeline:
|
|
| 154 |
torch_dtype=torch.float32,
|
| 155 |
cache_dir=CACHE_DIR,
|
| 156 |
)
|
| 157 |
-
_p.load_lora_weights(
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
_p.fuse_lora()
|
| 159 |
|
| 160 |
_p.scheduler = LCMScheduler.from_config(_p.scheduler.config)
|
|
|
|
| 128 |
"""
|
| 129 |
log.info(f"Loading model backend: {MODEL_BACKEND}")
|
| 130 |
if MODEL_BACKEND == "sdxl_lcm_unet":
|
| 131 |
+
# SDXL base with LCM UNet (no LoRA required)
|
| 132 |
unet = UNet2DConditionModel.from_pretrained(
|
| 133 |
"latent-consistency/lcm-sdxl",
|
| 134 |
torch_dtype=torch.float32,
|
|
|
|
| 141 |
cache_dir=CACHE_DIR,
|
| 142 |
)
|
| 143 |
elif MODEL_BACKEND == "ssd1b_lcm_lora":
|
| 144 |
+
# SSD-1B with LCM-LoRA (Diffusers backend; no PEFT required)
|
| 145 |
_p = AutoPipelineForText2Image.from_pretrained(
|
| 146 |
"segmind/SSD-1B",
|
| 147 |
torch_dtype=torch.float32,
|
| 148 |
cache_dir=CACHE_DIR,
|
| 149 |
)
|
| 150 |
+
_p.load_lora_weights(
|
| 151 |
+
"latent-consistency/lcm-lora-ssd-1b",
|
| 152 |
+
adapter_name="lcm",
|
| 153 |
+
use_peft_backend=False, # <-- avoid PEFT requirement
|
| 154 |
+
)
|
| 155 |
_p.fuse_lora()
|
| 156 |
else:
|
| 157 |
# Default: SDXL + LCM-LoRA (smaller download, great speed/quality)
|
|
|
|
| 160 |
torch_dtype=torch.float32,
|
| 161 |
cache_dir=CACHE_DIR,
|
| 162 |
)
|
| 163 |
+
_p.load_lora_weights(
|
| 164 |
+
"latent-consistency/lcm-lora-sdxl",
|
| 165 |
+
adapter_name="lcm",
|
| 166 |
+
use_peft_backend=False, # <-- avoid PEFT requirement
|
| 167 |
+
)
|
| 168 |
_p.fuse_lora()
|
| 169 |
|
| 170 |
_p.scheduler = LCMScheduler.from_config(_p.scheduler.config)
|