Update app.py
Browse files
app.py
CHANGED
|
@@ -416,24 +416,98 @@ def generate_video(
|
|
| 416 |
tiling_config = TilingConfig.default()
|
| 417 |
video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
|
| 418 |
|
| 419 |
-
# >>>
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 420 |
try:
|
| 421 |
-
#
|
| 422 |
-
|
| 423 |
-
|
| 424 |
-
|
| 425 |
-
|
| 426 |
-
|
| 427 |
-
|
| 428 |
-
|
|
|
|
|
|
|
|
|
|
| 429 |
try:
|
| 430 |
-
#
|
| 431 |
-
if hasattr(pipeline.model_ledger, "
|
| 432 |
-
|
| 433 |
-
|
| 434 |
-
|
| 435 |
-
|
| 436 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 437 |
log_memory("before pipeline call")
|
| 438 |
|
| 439 |
video, audio = pipeline(
|
|
|
|
| 416 |
tiling_config = TilingConfig.default()
|
| 417 |
video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
|
| 418 |
|
| 419 |
+
# >>> RUNTIME LoRA application (robust, multi-fallback)
|
| 420 |
+
# We cannot rely on mutating the original descriptor (some implementations are immutable),
|
| 421 |
+
# so create a fresh runtime descriptor and try multiple ways to install it.
|
| 422 |
+
runtime_strength = float(lora_strength)
|
| 423 |
+
replaced = False
|
| 424 |
+
|
| 425 |
+
# 1) Try simple approach: build a new LoraPathStrengthAndSDOps
|
| 426 |
+
runtime_lora = LoraPathStrengthAndSDOps(lora_path, runtime_strength, LTXV_LORA_COMFY_RENAMING_MAP)
|
| 427 |
+
print(f"[LoRA] attempting to apply runtime LoRA (strength={runtime_strength})")
|
| 428 |
+
|
| 429 |
+
# Try a few likely places to replace the descriptor used by the pipeline/ledger.
|
| 430 |
try:
|
| 431 |
+
# common attribute on pipeline
|
| 432 |
+
if hasattr(pipeline, "loras"):
|
| 433 |
+
try:
|
| 434 |
+
pipeline.loras = [runtime_lora]
|
| 435 |
+
replaced = True
|
| 436 |
+
print("[LoRA] replaced pipeline.loras")
|
| 437 |
+
except Exception as e:
|
| 438 |
+
print(f"[LoRA] pipeline.loras assignment failed: {e}")
|
| 439 |
+
except Exception:
|
| 440 |
+
pass
|
| 441 |
+
|
| 442 |
try:
|
| 443 |
+
# common attribute on the model ledger
|
| 444 |
+
if hasattr(pipeline, "model_ledger") and hasattr(pipeline.model_ledger, "loras"):
|
| 445 |
+
try:
|
| 446 |
+
pipeline.model_ledger.loras = [runtime_lora]
|
| 447 |
+
replaced = True
|
| 448 |
+
print("[LoRA] replaced pipeline.model_ledger.loras")
|
| 449 |
+
except Exception as e:
|
| 450 |
+
print(f"[LoRA] pipeline.model_ledger.loras assignment failed: {e}")
|
| 451 |
+
except Exception:
|
| 452 |
+
pass
|
| 453 |
+
|
| 454 |
+
try:
|
| 455 |
+
# some internals use a private _loras list
|
| 456 |
+
if hasattr(pipeline, "model_ledger") and hasattr(pipeline.model_ledger, "_loras"):
|
| 457 |
+
try:
|
| 458 |
+
pipeline.model_ledger._loras = [runtime_lora]
|
| 459 |
+
replaced = True
|
| 460 |
+
print("[LoRA] replaced pipeline.model_ledger._loras")
|
| 461 |
+
except Exception as e:
|
| 462 |
+
print(f"[LoRA] pipeline.model_ledger._loras assignment failed: {e}")
|
| 463 |
+
except Exception:
|
| 464 |
+
pass
|
| 465 |
+
|
| 466 |
+
# 2) If we succeeded replacing the descriptor in-place, clear transformer cache so it will rebuild
|
| 467 |
+
if replaced:
|
| 468 |
+
try:
|
| 469 |
+
if hasattr(pipeline.model_ledger, "_transformer"):
|
| 470 |
+
pipeline.model_ledger._transformer = None
|
| 471 |
+
# also clear potential caches named similar to 'transformer_cache' if present
|
| 472 |
+
if hasattr(pipeline.model_ledger, "transformer_cache"):
|
| 473 |
+
try:
|
| 474 |
+
pipeline.model_ledger.transformer_cache = {}
|
| 475 |
+
except Exception:
|
| 476 |
+
pass
|
| 477 |
+
print("[LoRA] in-place descriptor replacement done; transformer cache cleared")
|
| 478 |
+
except Exception as e:
|
| 479 |
+
print(f"[LoRA] replacement succeeded but cache clearing failed: {e}")
|
| 480 |
+
|
| 481 |
+
# 3) FINAL FALLBACK - if none of the in-place replacements worked, rebuild the pipeline
|
| 482 |
+
if not replaced:
|
| 483 |
+
print("[LoRA] in-place replacement FAILED; rebuilding pipeline with runtime LoRA (this is slow)")
|
| 484 |
+
try:
|
| 485 |
+
# Rebuild pipeline object with the new LoRA descriptor
|
| 486 |
+
# NOTE: this replaces the global `pipeline`. We must declare global to reassign it.
|
| 487 |
+
global pipeline
|
| 488 |
+
pipeline = LTX23DistilledA2VPipeline(
|
| 489 |
+
distilled_checkpoint_path=checkpoint_path,
|
| 490 |
+
spatial_upsampler_path=spatial_upsampler_path,
|
| 491 |
+
gemma_root=gemma_root,
|
| 492 |
+
loras=[runtime_lora],
|
| 493 |
+
quantization=QuantizationPolicy.fp8_cast(),
|
| 494 |
+
)
|
| 495 |
+
|
| 496 |
+
# After rebuilding, we *do not* re-run the original module-level preloads here,
|
| 497 |
+
# because re-pinning may be complex; the rebuilt pipeline will construct its
|
| 498 |
+
# own ledger as part of the first call. This is slower but reliable.
|
| 499 |
+
# Clear any transformer caches if they exist on the new ledger as well.
|
| 500 |
+
try:
|
| 501 |
+
if hasattr(pipeline.model_ledger, "_transformer"):
|
| 502 |
+
pipeline.model_ledger._transformer = None
|
| 503 |
+
except Exception:
|
| 504 |
+
pass
|
| 505 |
+
|
| 506 |
+
print("[LoRA] pipeline rebuilt with runtime LoRA")
|
| 507 |
+
except Exception as e:
|
| 508 |
+
print(f"[LoRA] pipeline rebuild FAILED: {e}")
|
| 509 |
+
|
| 510 |
+
# Finally, log memory then proceed
|
| 511 |
log_memory("before pipeline call")
|
| 512 |
|
| 513 |
video, audio = pipeline(
|