Spaces:
Runtime error
Runtime error
Update app.py
Browse files
app.py
CHANGED
|
@@ -409,6 +409,7 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
|
|
| 409 |
|
| 410 |
print("Loaded state dict:", loaded_state_dict)
|
| 411 |
print("Last LoRA:", last_lora, "| Current LoRA:", repo_name)
|
|
|
|
| 412 |
|
| 413 |
# Prepare control images and scales based on face detection
|
| 414 |
if face_detected:
|
|
@@ -437,14 +438,43 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
|
|
| 437 |
# Improved LoRA loading and caching
|
| 438 |
if last_lora != repo_name:
|
| 439 |
if last_fused:
|
| 440 |
-
|
| 441 |
-
|
| 442 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 443 |
|
| 444 |
# Load LoRA with better error handling
|
| 445 |
try:
|
| 446 |
-
|
| 447 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 448 |
last_fused = True
|
| 449 |
|
| 450 |
# Handle pivotal tuning embeddings
|
|
@@ -466,8 +496,13 @@ def generate_image(prompt, negative, face_emb, face_image, face_kps, image_stren
|
|
| 466 |
tokenizer=pipe.tokenizer_2
|
| 467 |
)
|
| 468 |
except Exception as e:
|
|
|
|
|
|
|
| 469 |
print(f"Error loading LoRA: {e}")
|
| 470 |
-
|
|
|
|
|
|
|
|
|
|
| 471 |
|
| 472 |
print("Processing prompt...")
|
| 473 |
conditioning, pooled = compel(prompt)
|
|
|
|
| 409 |
|
| 410 |
print("Loaded state dict:", loaded_state_dict)
|
| 411 |
print("Last LoRA:", last_lora, "| Current LoRA:", repo_name)
|
| 412 |
+
print("LoRA scale:", lora_scale, "Type:", type(lora_scale))
|
| 413 |
|
| 414 |
# Prepare control images and scales based on face detection
|
| 415 |
if face_detected:
|
|
|
|
| 438 |
# Improved LoRA loading and caching
|
| 439 |
if last_lora != repo_name:
|
| 440 |
if last_fused:
|
| 441 |
+
try:
|
| 442 |
+
# Unload previous LoRA adapter
|
| 443 |
+
pipe.delete_adapters(["style_lora"])
|
| 444 |
+
print("Unloaded previous LoRA adapter")
|
| 445 |
+
except Exception as e:
|
| 446 |
+
print(f"Warning: Could not unload previous adapter: {e}")
|
| 447 |
+
# Try alternative method
|
| 448 |
+
try:
|
| 449 |
+
pipe.unload_lora_weights()
|
| 450 |
+
print("Unloaded LoRA weights via alternative method")
|
| 451 |
+
except:
|
| 452 |
+
pass
|
| 453 |
+
|
| 454 |
+
try:
|
| 455 |
+
pipe.unload_textual_inversion()
|
| 456 |
+
print("Unloaded textual inversion")
|
| 457 |
+
except Exception as e:
|
| 458 |
+
print(f"Warning: Could not unload textual inversion: {e}")
|
| 459 |
|
| 460 |
# Load LoRA with better error handling
|
| 461 |
try:
|
| 462 |
+
# For diffusers >= 0.27, load_lora_weights expects different parameters
|
| 463 |
+
if full_path_lora.endswith('.safetensors') or full_path_lora.endswith('.bin'):
|
| 464 |
+
# Single file loading
|
| 465 |
+
import os
|
| 466 |
+
lora_dir = os.path.dirname(full_path_lora)
|
| 467 |
+
lora_file = os.path.basename(full_path_lora)
|
| 468 |
+
print(f"Loading LoRA from: {lora_dir}/{lora_file}")
|
| 469 |
+
pipe.load_lora_weights(lora_dir, weight_name=lora_file, adapter_name="style_lora")
|
| 470 |
+
else:
|
| 471 |
+
# Directory loading
|
| 472 |
+
print(f"Loading LoRA from directory: {full_path_lora}")
|
| 473 |
+
pipe.load_lora_weights(full_path_lora, adapter_name="style_lora")
|
| 474 |
+
|
| 475 |
+
# Set adapter scale instead of fusing
|
| 476 |
+
pipe.set_adapters(["style_lora"], adapter_weights=[float(lora_scale)])
|
| 477 |
+
print(f"LoRA loaded and adapter set with scale: {lora_scale}")
|
| 478 |
last_fused = True
|
| 479 |
|
| 480 |
# Handle pivotal tuning embeddings
|
|
|
|
| 496 |
tokenizer=pipe.tokenizer_2
|
| 497 |
)
|
| 498 |
except Exception as e:
|
| 499 |
+
import traceback
|
| 500 |
+
full_error = traceback.format_exc()
|
| 501 |
print(f"Error loading LoRA: {e}")
|
| 502 |
+
print(f"Full traceback:\n{full_error}")
|
| 503 |
+
print(f"Full path attempted: {full_path_lora}")
|
| 504 |
+
print(f"LoRA scale attempted: {lora_scale} (type: {type(lora_scale)})")
|
| 505 |
+
raise gr.Error(f"Failed to load LoRA: {str(e)}\n\nPath: {full_path_lora}\nScale: {lora_scale}")
|
| 506 |
|
| 507 |
print("Processing prompt...")
|
| 508 |
conditioning, pooled = compel(prompt)
|