Spaces:
Running on Zero
Running on Zero
| # ============================================================================= | |
| # Installation and Setup | |
| # ============================================================================= | |
| import os | |
| import subprocess | |
| import sys | |
| os.environ["TORCH_COMPILE_DISABLE"] = "1" | |
| os.environ["TORCHDYNAMO_DISABLE"] = "1" | |
| subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False) | |
| LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git" | |
| LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2") | |
| LTX_COMMIT = "ae855f8538843825f9015a419cf4ba5edaf5eec2" | |
| if not os.path.exists(LTX_REPO_DIR): | |
| print(f"Cloning {LTX_REPO_URL}...") | |
| subprocess.run(["git", "clone", LTX_REPO_URL, LTX_REPO_DIR], check=True) | |
| subprocess.run(["git", "checkout", LTX_COMMIT], cwd=LTX_REPO_DIR, check=True) | |
| print("Installing ltx-core and ltx-pipelines from cloned repo...") | |
| subprocess.run( | |
| [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e", | |
| os.path.join(LTX_REPO_DIR, "packages", "ltx-core"), | |
| "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")], | |
| check=True, | |
| ) | |
| sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src")) | |
| sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src")) | |
| # ============================================================================= | |
| # Imports | |
| # ============================================================================= | |
| import logging | |
| import random | |
| import tempfile | |
| from pathlib import Path | |
| import gc | |
| import hashlib | |
| import torch | |
| torch._dynamo.config.suppress_errors = True | |
| torch._dynamo.config.disable = True | |
| import spaces | |
| import gradio as gr | |
| import numpy as np | |
| from huggingface_hub import hf_hub_download, snapshot_download | |
| from safetensors.torch import load_file, save_file | |
| from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number | |
| from ltx_core.model.audio_vae import decode_audio as vae_decode_audio | |
| from ltx_core.model.video_vae import decode_video as vae_decode_video | |
| from ltx_core.model.upsampler import upsample_video | |
| from ltx_core.quantization import QuantizationPolicy | |
| from ltx_core.loader import LoraPathStrengthAndSDOps, LTXV_LORA_COMFY_RENAMING_MAP | |
| from ltx_core.components.guiders import MultiModalGuider, MultiModalGuiderParams | |
| from ltx_core.components.noisers import GaussianNoiser | |
| from ltx_core.components.diffusion_steps import Res2sDiffusionStep | |
| from ltx_core.components.schedulers import LTX2Scheduler | |
| from ltx_core.types import Audio, LatentState, VideoPixelShape, AudioLatentShape | |
| from ltx_core.tools import VideoLatentShape | |
| from ltx_pipelines.ti2vid_two_stages_hq import TI2VidTwoStagesHQPipeline | |
| from ltx_pipelines.utils.args import ImageConditioningInput | |
| from ltx_pipelines.utils.constants import LTX_2_3_HQ_PARAMS, STAGE_2_DISTILLED_SIGMA_VALUES | |
| from ltx_pipelines.utils.media_io import encode_video | |
| from ltx_pipelines.utils.helpers import ( | |
| assert_resolution, | |
| cleanup_memory, | |
| combined_image_conditionings, | |
| encode_prompts, | |
| multi_modal_guider_denoising_func, | |
| simple_denoising_func, | |
| denoise_audio_video, | |
| ) | |
| from ltx_pipelines.utils import res2s_audio_video_denoising_loop | |
| # Patch xformers | |
| try: | |
| from ltx_core.model.transformer import attention as _attn_mod | |
| from xformers.ops import memory_efficient_attention as _mea | |
| _attn_mod.memory_efficient_attention = _mea | |
| print("[ATTN] xformers patch applied") | |
| except Exception as e: | |
| print(f"[ATTN] xformers patch failed: {e}") | |
| logging.getLogger().setLevel(logging.INFO) | |
| MAX_SEED = np.iinfo(np.int32).max | |
| DEFAULT_PROMPT = ( | |
| "A majestic eagle soaring over mountain peaks at sunset, " | |
| "wings spread wide against the orange sky, feathers catching the light, " | |
| "wind currents visible in the motion blur, cinematic slow motion, 4K quality" | |
| ) | |
| DEFAULT_NEGATIVE_PROMPT = ( | |
| "worst quality, inconsistent motion, blurry, jittery, distorted, " | |
| "deformed, artifacts, text, watermark, logo, frame, border, " | |
| "low resolution, pixelated, unnatural, fake, CGI, cartoon" | |
| ) | |
| DEFAULT_FRAME_RATE = 24.0 | |
| MIN_DIM, MAX_DIM, STEP = 256, 1280, 64 | |
| MIN_FRAMES, MAX_FRAMES = 9, 721 | |
| # Resolution presets with high/low tiers | |
| RESOLUTIONS = { | |
| "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)}, | |
| "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)}, | |
| } | |
| LTX_MODEL_REPO = "Lightricks/LTX-2.3" | |
| GEMMA_REPO = "Lightricks/gemma-3-12b-it-qat-q4_0-unquantized" | |
| # ============================================================================= | |
| # Custom HQ Pipeline with LoRA Cache Support | |
| # ============================================================================= | |
| class HQPipelineWithCachedLoRA: | |
| """ | |
| Custom HQ pipeline that: | |
| 1. Creates ONE ModelLedger WITHOUT LoRAs | |
| 2. Handles ALL LoRAs via cached state (distilled + 12 custom) | |
| 3. Supports CFG/negative prompts and guidance parameters | |
| 4. Reuses single transformer for both stages | |
| 5. Uses 8 steps at half resolution + 3 steps at full resolution | |
| """ | |
| def __init__( | |
| self, | |
| checkpoint_path: str, | |
| spatial_upsampler_path: str, | |
| gemma_root: str, | |
| quantization: QuantizationPolicy | None = None, | |
| ): | |
| from ltx_pipelines.utils import ModelLedger | |
| from ltx_pipelines.utils.types import PipelineComponents | |
| self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu") | |
| self.dtype = torch.bfloat16 | |
| print(" Creating ModelLedger (no LoRAs)...") | |
| self.model_ledger = ModelLedger( | |
| dtype=self.dtype, | |
| device=self.device, | |
| checkpoint_path=checkpoint_path, | |
| gemma_root_path=gemma_root, | |
| spatial_upsampler_path=spatial_upsampler_path, | |
| loras=(), | |
| quantization=quantization, | |
| ) | |
| self.pipeline_components = PipelineComponents( | |
| dtype=self.dtype, | |
| device=self.device, | |
| ) | |
| self._cached_state = None | |
| def apply_cached_lora_state(self, state_dict): | |
| """Apply pre-cached LoRA state to transformer.""" | |
| self._cached_state = state_dict | |
| def __call__( # noqa: PLR0913 | |
| self, | |
| prompt: str, | |
| negative_prompt: str, | |
| seed: int, | |
| height: int, | |
| width: int, | |
| num_frames: int, | |
| frame_rate: float, | |
| video_guider_params: MultiModalGuiderParams, | |
| audio_guider_params: MultiModalGuiderParams, | |
| images: list, | |
| tiling_config: TilingConfig | None = None, | |
| ): | |
| from ltx_pipelines.utils import assert_resolution, cleanup_memory, combined_image_conditionings, encode_prompts, res2s_audio_video_denoising_loop, multi_modal_guider_denoising_func, simple_denoising_func, denoise_audio_video | |
| from ltx_core.tools import VideoLatentShape | |
| from ltx_core.components.noisers import GaussianNoiser | |
| from ltx_core.components.diffusion_steps import Res2sDiffusionStep | |
| from ltx_core.types import VideoPixelShape | |
| from ltx_core.model.upsampler import upsample_video | |
| from ltx_core.model.video_vae import decode_video as vae_decode_video | |
| from ltx_core.model.audio_vae import decode_audio as vae_decode_audio | |
| assert_resolution(height=height, width=width, is_two_stage=True) | |
| device = self.device | |
| dtype = self.dtype | |
| generator = torch.Generator(device=device).manual_seed(seed) | |
| noiser = GaussianNoiser(generator=generator) | |
| # NO LoRA application here - done in apply_prepared_lora_state_to_pipeline() | |
| ctx_p, ctx_n = encode_prompts( | |
| [prompt, negative_prompt], | |
| self.model_ledger, | |
| ) | |
| v_context_p, a_context_p = ctx_p.video_encoding, ctx_p.audio_encoding | |
| v_context_n, a_context_n = ctx_n.video_encoding, ctx_n.audio_encoding | |
| # ===================== STAGE 1: 8 steps at half resolution ===================== | |
| stage_1_output_shape = VideoPixelShape( | |
| batch=1, frames=num_frames, | |
| width=width // 2, height=height // 2, fps=frame_rate | |
| ) | |
| video_encoder = self.model_ledger.video_encoder() | |
| stage_1_conditionings = combined_image_conditionings( | |
| images=images, | |
| height=stage_1_output_shape.height, | |
| width=stage_1_output_shape.width, | |
| video_encoder=video_encoder, | |
| dtype=dtype, | |
| device=device, | |
| ) | |
| torch.cuda.synchronize() | |
| del video_encoder | |
| cleanup_memory() | |
| transformer = self.model_ledger.transformer() | |
| # Use DISTILLED_SIGMA_VALUES for 8 steps at half resolution | |
| from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES | |
| stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=device) | |
| stepper = Res2sDiffusionStep() | |
| def first_stage_denoising_loop(sigmas, video_state, audio_state, stepper): | |
| return res2s_audio_video_denoising_loop( | |
| sigmas=sigmas, | |
| video_state=video_state, | |
| audio_state=audio_state, | |
| stepper=stepper, | |
| denoise_fn=multi_modal_guider_denoising_func( | |
| video_guider=MultiModalGuider(params=video_guider_params, negative_context=v_context_n), | |
| audio_guider=MultiModalGuider(params=audio_guider_params, negative_context=a_context_n), | |
| v_context=v_context_p, | |
| a_context=a_context_p, | |
| transformer=transformer, | |
| ), | |
| ) | |
| video_state, audio_state = denoise_audio_video( | |
| output_shape=stage_1_output_shape, | |
| conditionings=stage_1_conditionings, | |
| noiser=noiser, | |
| sigmas=stage_1_sigmas, | |
| stepper=stepper, | |
| denoising_loop_fn=first_stage_denoising_loop, | |
| components=self.pipeline_components, | |
| dtype=dtype, | |
| device=device, | |
| ) | |
| torch.cuda.synchronize() | |
| del transformer | |
| cleanup_memory() | |
| # ===================== UPSCALING ===================== | |
| video_encoder = self.model_ledger.video_encoder() | |
| upscaled_video_latent = upsample_video( | |
| latent=video_state.latent[:1], | |
| video_encoder=video_encoder, | |
| upsampler=self.model_ledger.spatial_upsampler(), | |
| ) | |
| stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate) | |
| stage_2_conditionings = combined_image_conditionings( | |
| images=images, | |
| height=stage_2_output_shape.height, | |
| width=stage_2_output_shape.width, | |
| video_encoder=video_encoder, | |
| dtype=dtype, | |
| device=device, | |
| ) | |
| torch.cuda.synchronize() | |
| del video_encoder | |
| cleanup_memory() | |
| # ===================== STAGE 2: 3 steps at full resolution ===================== | |
| transformer = self.model_ledger.transformer() | |
| from ltx_pipelines.utils.constants import STAGE_2_DISTILLED_SIGMA_VALUES | |
| stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=device) | |
| def second_stage_denoising_loop(sigmas, video_state, audio_state, stepper): | |
| return res2s_audio_video_denoising_loop( | |
| sigmas=sigmas, | |
| video_state=video_state, | |
| audio_state=audio_state, | |
| stepper=stepper, | |
| denoise_fn=simple_denoising_func( | |
| video_context=v_context_p, | |
| audio_context=a_context_p, | |
| transformer=transformer, | |
| ), | |
| ) | |
| video_state, audio_state = denoise_audio_video( | |
| output_shape=stage_2_output_shape, | |
| conditionings=stage_2_conditionings, | |
| noiser=noiser, | |
| sigmas=stage_2_sigmas, | |
| stepper=stepper, | |
| denoising_loop_fn=second_stage_denoising_loop, | |
| components=self.pipeline_components, | |
| dtype=dtype, | |
| device=device, | |
| noise_scale=stage_2_sigmas[0], | |
| initial_video_latent=upscaled_video_latent, | |
| initial_audio_latent=audio_state.latent, | |
| ) | |
| torch.cuda.synchronize() | |
| del transformer | |
| cleanup_memory() | |
| # ===================== DECODE ===================== | |
| decoded_video = vae_decode_video( | |
| video_state.latent, self.model_ledger.video_decoder(), tiling_config, generator | |
| ) | |
| decoded_audio = vae_decode_audio( | |
| audio_state.latent, self.model_ledger.audio_decoder(), self.model_ledger.vocoder() | |
| ) | |
| return decoded_video, decoded_audio | |
| # ============================================================================= | |
| # Model Download | |
| # ============================================================================= | |
| print("=" * 80) | |
| print("Downloading LTX-2.3 HQ models...") | |
| print("=" * 80) | |
| weights_dir = Path("weights") | |
| weights_dir.mkdir(exist_ok=True) | |
| checkpoint_path = hf_hub_download( | |
| repo_id=LTX_MODEL_REPO, | |
| filename="ltx-2.3-22b-distilled-1.1.safetensors", | |
| local_dir=str(weights_dir), | |
| local_dir_use_symlinks=False, # Ensure actual file copy, not symlink | |
| ) | |
| # Force download if not present | |
| if not os.path.exists(checkpoint_path): | |
| print(f"Re-downloading checkpoint to {weights_dir}...") | |
| checkpoint_path = hf_hub_download( | |
| repo_id=LTX_MODEL_REPO, | |
| filename="ltx-2.3-22b-distilled-1.1.safetensors", | |
| local_dir=str(weights_dir), | |
| local_dir_use_symlinks=False, | |
| force_download=True, | |
| ) | |
| print(f"Checkpoint at: {checkpoint_path}") | |
| print(f"File exists: {os.path.exists(checkpoint_path)}") | |
| print(f"File size: {os.path.getsize(checkpoint_path) / 1024**3:.2f} GB") | |
| spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.1.safetensors") | |
| distilled_lora_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-distilled-lora-384.safetensors") | |
| gemma_root = snapshot_download(repo_id=GEMMA_REPO) | |
| print(f"Dev checkpoint: {checkpoint_path}") | |
| print(f"Spatial upsampler: {spatial_upsampler_path}") | |
| print(f"Distilled LoRA: {distilled_lora_path}") | |
| print(f"Gemma root: {gemma_root}") | |
| # ============================================================================= | |
| # Download Custom LoRAs | |
| # ============================================================================= | |
| LORA_REPO = "dagloop5/LoRA" | |
| print("=" * 80) | |
| print("Downloading custom LoRA adapters...") | |
| print("=" * 80) | |
| pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors") | |
| general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_reasoning_I2V_V3.safetensors") | |
| motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors") | |
| dreamlay_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="DR34ML4Y_LTXXX_PREVIEW_RC1.safetensors") | |
| mself_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="Furry Hyper Masturbation - LTX-2 I2V v1.safetensors") | |
| dramatic_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2.3 - Orgasm.safetensors") | |
| fluid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="cr3ampi3_animation_i2v_ltx2_v1.0.safetensors") | |
| liquid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="liquid_wet_dr1pp_ltx2_v1.0_scaled.safetensors") | |
| demopose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="clapping-cheeks-audio-v001-alpha.safetensors") | |
| voice_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="hentai_voice_ltx23.safetensors") | |
| realism_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="FurryenhancerLTX2.3V1.215.safetensors") | |
| transition_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2_takerpov_lora_v1.2.safetensors") | |
| print(f"All 12 custom LoRAs downloaded + distilled LoRA") | |
| print("=" * 80) | |
| # ============================================================================= | |
| # Pipeline Initialization | |
| # ============================================================================= | |
| print("Initializing HQ Pipeline...") | |
| pipeline = HQPipelineWithCachedLoRA( | |
| checkpoint_path=checkpoint_path, | |
| spatial_upsampler_path=spatial_upsampler_path, | |
| gemma_root=gemma_root, | |
| quantization=QuantizationPolicy.fp8_cast(), | |
| ) | |
| print("Pipeline initialized!") | |
| print("=" * 80) | |
| # ============================================================================= | |
| # ZeroGPU Tensor Preloading - Single Transformer | |
| # ============================================================================= | |
| print("Preloading models for ZeroGPU tensor packing...") | |
| # Load shared components | |
| _video_encoder = pipeline.model_ledger.video_encoder() | |
| _video_decoder = pipeline.model_ledger.video_decoder() | |
| _audio_encoder = pipeline.model_ledger.audio_encoder() | |
| _audio_decoder = pipeline.model_ledger.audio_decoder() | |
| _vocoder = pipeline.model_ledger.vocoder() | |
| _spatial_upsampler = pipeline.model_ledger.spatial_upsampler() | |
| _text_encoder = pipeline.model_ledger.text_encoder() | |
| _embeddings_processor = pipeline.model_ledger.gemma_embeddings_processor() | |
| # Load the SINGLE transformer | |
| _transformer = pipeline.model_ledger.transformer() | |
| # Replace ledger methods with lambdas returning cached instances | |
| pipeline.model_ledger.video_encoder = lambda: _video_encoder | |
| pipeline.model_ledger.video_decoder = lambda: _video_decoder | |
| pipeline.model_ledger.audio_encoder = lambda: _audio_encoder | |
| pipeline.model_ledger.audio_decoder = lambda: _audio_decoder | |
| pipeline.model_ledger.vocoder = lambda: _vocoder | |
| pipeline.model_ledger.spatial_upsampler = lambda: _spatial_upsampler | |
| pipeline.model_ledger.text_encoder = lambda: _text_encoder | |
| pipeline.model_ledger.gemma_embeddings_processor = lambda: _embeddings_processor | |
| pipeline.model_ledger.transformer = lambda: _transformer | |
| print("All models preloaded for ZeroGPU tensor packing!") | |
| print("=" * 80) | |
| print("Pipeline ready!") | |
| print("=" * 80) | |
| # ============================================================================= | |
| # LoRA Cache Functions | |
| # ============================================================================= | |
| LORA_CACHE_DIR = Path("lora_cache") | |
| LORA_CACHE_DIR.mkdir(exist_ok=True) | |
| def prepare_lora_cache( | |
| distilled_strength: float, | |
| pose_strength: float, general_strength: float, motion_strength: float, | |
| dreamlay_strength: float, mself_strength: float, dramatic_strength: float, | |
| fluid_strength: float, liquid_strength: float, demopose_strength: float, | |
| voice_strength: float, realism_strength: float, transition_strength: float, | |
| progress=gr.Progress(track_tqdm=True), | |
| ): | |
| """Build cached LoRA state for single transformer.""" | |
| global pipeline | |
| print("[LoRA] === Starting LoRA Cache Preparation ===") | |
| progress(0.05, desc="Preparing LoRA cache...") | |
| # Validate all LoRA files exist | |
| print("[LoRA] Validating LoRA file paths...") | |
| lora_files = [ | |
| ("Distilled", distilled_lora_path, distilled_strength), | |
| ("Pose", pose_lora_path, pose_strength), | |
| ("General", general_lora_path, general_strength), | |
| ("Motion", motion_lora_path, motion_strength), | |
| ("Dreamlay", dreamlay_lora_path, dreamlay_strength), | |
| ("Mself", mself_lora_path, mself_strength), | |
| ("Dramatic", dramatic_lora_path, dramatic_strength), | |
| ("Fluid", fluid_lora_path, fluid_strength), | |
| ("Liquid", liquid_lora_path, liquid_strength), | |
| ("Demopose", demopose_lora_path, demopose_strength), | |
| ("Voice", voice_lora_path, voice_strength), | |
| ("Realism", realism_lora_path, realism_strength), | |
| ("Transition", transition_lora_path, transition_strength), | |
| ] | |
| active_loras = [] | |
| for name, path, strength in lora_files: | |
| if path is not None and float(strength) != 0.0: | |
| active_loras.append((name, path, strength)) | |
| print(f"[LoRA] - {name}: strength={strength}") | |
| print(f"[LoRA] Active LoRAs: {len(active_loras)}") | |
| key_str = f"{checkpoint_path}:{distilled_strength}:{pose_strength}:{general_strength}:{motion_strength}:{dreamlay_strength}:{mself_strength}:{dramatic_strength}:{fluid_strength}:{liquid_strength}:{demopose_strength}:{voice_strength}:{realism_strength}:{transition_strength}" | |
| key = hashlib.sha256(key_str.encode()).hexdigest() | |
| cache_path = LORA_CACHE_DIR / f"{key}.safetensors" | |
| print(f"[LoRA] Cache key: {key[:16]}...") | |
| print(f"[LoRA] Cache path: {cache_path}") | |
| if cache_path.exists(): | |
| print("[LoRA] Loading from existing cache...") | |
| progress(0.20, desc="Loading cached LoRA state...") | |
| state = load_file(str(cache_path)) | |
| print(f"[LoRA] Loaded state dict with {len(state)} keys, size: {sum(v.numel() * v.element_size() for v in state.values()) / 1024**3:.2f} GB") | |
| pipeline.apply_cached_lora_state(state) | |
| print("[LoRA] State applied to pipeline._cached_state") | |
| print("[LoRA] === LoRA Cache Preparation Complete ===") | |
| return f"Loaded cached LoRA state: {cache_path.name} ({len(state)} keys)" | |
| if not active_loras: | |
| print("[LoRA] No non-zero LoRA strengths selected; nothing to prepare.") | |
| print("[LoRA] === LoRA Cache Preparation Complete (no LoRAs) ===") | |
| return "No non-zero LoRA strengths selected; nothing to prepare." | |
| entries = [ | |
| (distilled_lora_path, distilled_strength), | |
| (pose_lora_path, pose_strength), | |
| (general_lora_path, general_strength), | |
| (motion_lora_path, motion_strength), | |
| (dreamlay_lora_path, dreamlay_strength), | |
| (mself_lora_path, mself_strength), | |
| (dramatic_lora_path, dramatic_strength), | |
| (fluid_lora_path, fluid_strength), | |
| (liquid_lora_path, liquid_strength), | |
| (demopose_lora_path, demopose_strength), | |
| (voice_lora_path, voice_strength), | |
| (realism_lora_path, realism_strength), | |
| (transition_lora_path, transition_strength), | |
| ] | |
| loras_for_builder = [ | |
| LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP) | |
| for path, strength in entries | |
| if path is not None and float(strength) != 0.0 | |
| ] | |
| print(f"[LoRA] Building fused state on CPU with {len(loras_for_builder)} LoRAs...") | |
| print("[LoRA] This may take several minutes (do not close the Space)...") | |
| progress(0.35, desc="Building fused state (CPU)...") | |
| import time | |
| start_time = time.time() | |
| tmp_ledger = pipeline.model_ledger.__class__( | |
| dtype=torch.bfloat16, | |
| device=torch.device("cpu"), | |
| checkpoint_path=str(checkpoint_path), | |
| spatial_upsampler_path=str(spatial_upsampler_path), | |
| gemma_root_path=str(gemma_root), | |
| loras=tuple(loras_for_builder), | |
| quantization=None, | |
| ) | |
| print(f"[LoRA] Temporary ledger created in {time.time() - start_time:.1f}s") | |
| print("[LoRA] Loading transformer with LoRAs applied...") | |
| transformer = tmp_ledger.transformer() | |
| print(f"[LoRA] Transformer loaded in {time.time() - start_time:.1f}s") | |
| print("[LoRA] Extracting state dict...") | |
| progress(0.70, desc="Extracting fused stateDict") | |
| state = {k: v.detach().cpu().contiguous() for k, v in transformer.state_dict().items()} | |
| print(f"[LoRA] State dict extracted: {len(state)} keys") | |
| print(f"[LoRA] Saving to cache: {cache_path}") | |
| save_file(state, str(cache_path)) | |
| print(f"[LoRA] Cache saved, size: {sum(v.numel() * v.element_size() for v in state.values()) / 1024**3:.2f} GB") | |
| print("[LoRA] Cleaning up temporary ledger...") | |
| del transformer, tmp_ledger | |
| gc.collect() | |
| print(f"[LoRA] Cleanup complete in {time.time() - start_time:.1f}s total") | |
| print("[LoRA] Applying state to pipeline._cached_state...") | |
| progress(0.90, desc="Applying LoRA state to pipeline...") | |
| pipeline.apply_cached_lora_state(state) | |
| progress(1.0, desc="Done!") | |
| print("[LoRA] === LoRA Cache Preparation Complete ===") | |
| return f"Built and cached LoRA state: {cache_path.name} ({len(state)} keys, {time.time() - start_time:.1f}s)" | |
| # ============================================================================= | |
| # LoRA State Application (called BEFORE pipeline generation) | |
| # ============================================================================= | |
| def apply_prepared_lora_state_to_pipeline(): | |
| """ | |
| Apply the prepared LoRA state from pipeline._cached_state to the preloaded | |
| transformer. This should be called BEFORE pipeline generation, not during. | |
| """ | |
| print("[LoRA] === Applying LoRA State to Transformer ===") | |
| if pipeline._cached_state is None: | |
| print("[LoRA] No prepared LoRA state available; skipping.") | |
| print("[LoRA] === LoRA Application Complete (no state) ===") | |
| return False | |
| try: | |
| existing_transformer = _transformer # The preloaded transformer from globals | |
| state = pipeline._cached_state | |
| print(f"[LoRA] Applying state dict with {len(state)} keys...") | |
| print(f"[LoRA] State dict size: {sum(v.numel() * v.element_size() for v in state.values()) / 1024**3:.2f} GB") | |
| import time | |
| start_time = time.time() | |
| with torch.no_grad(): | |
| missing, unexpected = existing_transformer.load_state_dict(state, strict=False) | |
| print(f"[LoRA] load_state_dict completed in {time.time() - start_time:.1f}s") | |
| if missing: | |
| print(f"[LoRA] WARNING: {len(missing)} keys missing from state dict") | |
| if unexpected: | |
| print(f"[LoRA] WARNING: {len(unexpected)} unexpected keys in state dict") | |
| if not missing and not unexpected: | |
| print("[LoRA] State dict loaded successfully with no mismatches!") | |
| print("[LoRA] === LoRA Application Complete (success) ===") | |
| return True | |
| except Exception as e: | |
| print(f"[LoRA] FAILED to apply LoRA state: {type(e).__name__}: {e}") | |
| print("[LoRA] === LoRA Application Complete (FAILED) ===") | |
| return False | |
| # ============================================================================= | |
| # Helper Functions | |
| # ============================================================================= | |
| def log_memory(tag: str): | |
| if torch.cuda.is_available(): | |
| allocated = torch.cuda.memory_allocated() / 1024**3 | |
| peak = torch.cuda.max_memory_allocated() / 1024**3 | |
| free, total = torch.cuda.mem_get_info() | |
| print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB") | |
| def calculate_frames(duration: float, frame_rate: float = DEFAULT_FRAME_RATE) -> int: | |
| ideal_frames = int(duration * frame_rate) | |
| ideal_frames = max(ideal_frames, MIN_FRAMES) | |
| k = round((ideal_frames - 1) / 8) | |
| frames = k * 8 + 1 | |
| return min(frames, MAX_FRAMES) | |
| def detect_aspect_ratio(image) -> str: | |
| if image is None: | |
| return "16:9" | |
| if hasattr(image, "size"): | |
| w, h = image.size | |
| elif hasattr(image, "shape"): | |
| h, w = image.shape[:2] | |
| else: | |
| return "16:9" | |
| ratio = w / h | |
| candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0} | |
| return min(candidates, key=lambda k: abs(ratio - candidates[k])) | |
| def on_image_upload(first_image, last_image, high_res): | |
| ref_image = first_image if first_image is not None else last_image | |
| aspect = detect_aspect_ratio(ref_image) | |
| tier = "high" if high_res else "low" | |
| w, h = RESOLUTIONS[tier][aspect] | |
| return gr.update(value=w), gr.update(value=h) | |
| def on_highres_toggle(first_image, last_image, high_res): | |
| ref_image = first_image if first_image is not None else last_image | |
| aspect = detect_aspect_ratio(ref_image) | |
| tier = "high" if high_res else "low" | |
| w, h = RESOLUTIONS[tier][aspect] | |
| return gr.update(value=w), gr.update(value=h) | |
| def get_gpu_duration( | |
| first_image, | |
| last_image, | |
| prompt: str, | |
| negative_prompt: str, | |
| duration: float, | |
| gpu_duration: float, | |
| seed: int = 42, | |
| randomize_seed: bool = True, | |
| height: int = 1024, | |
| width: int = 1536, | |
| video_cfg_scale: float = 1.0, | |
| video_stg_scale: float = 0.0, | |
| video_rescale_scale: float = 0.45, | |
| video_a2v_scale: float = 3.0, | |
| audio_cfg_scale: float = 1.0, | |
| audio_stg_scale: float = 0.0, | |
| audio_rescale_scale: float = 1.0, | |
| audio_v2a_scale: float = 3.0, | |
| distilled_strength: float = 0.0, | |
| pose_strength: float = 0.0, | |
| general_strength: float = 0.0, | |
| motion_strength: float = 0.0, | |
| dreamlay_strength: float = 0.0, | |
| mself_strength: float = 0.0, | |
| dramatic_strength: float = 0.0, | |
| fluid_strength: float = 0.0, | |
| liquid_strength: float = 0.0, | |
| demopose_strength: float = 0.0, | |
| voice_strength: float = 0.0, | |
| realism_strength: float = 0.0, | |
| transition_strength: float = 0.0, | |
| progress=None, | |
| ) -> int: | |
| return int(gpu_duration) | |
| def generate_video( | |
| first_image, | |
| last_image, | |
| prompt: str, | |
| negative_prompt: str, | |
| duration: float, | |
| gpu_duration: float, | |
| seed: int = 42, | |
| randomize_seed: bool = True, | |
| height: int = 1024, | |
| width: int = 1536, | |
| video_cfg_scale: float = 1.0, | |
| video_stg_scale: float = 0.0, | |
| video_rescale_scale: float = 0.45, | |
| video_a2v_scale: float = 3.0, | |
| audio_cfg_scale: float = 1.0, | |
| audio_stg_scale: float = 0.0, | |
| audio_rescale_scale: float = 1.0, | |
| audio_v2a_scale: float = 3.0, | |
| distilled_strength: float = 0.0, | |
| pose_strength: float = 0.0, | |
| general_strength: float = 0.0, | |
| motion_strength: float = 0.0, | |
| dreamlay_strength: float = 0.0, | |
| mself_strength: float = 0.0, | |
| dramatic_strength: float = 0.0, | |
| fluid_strength: float = 0.0, | |
| liquid_strength: float = 0.0, | |
| demopose_strength: float = 0.0, | |
| voice_strength: float = 0.0, | |
| realism_strength: float = 0.0, | |
| transition_strength: float = 0.0, | |
| progress=gr.Progress(track_tqdm=True), | |
| ): | |
| try: | |
| torch.cuda.reset_peak_memory_stats() | |
| log_memory("start") | |
| current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed) | |
| print(f"Using seed: {current_seed}") | |
| print(f"Resolution: {width}x{height}") | |
| num_frames = calculate_frames(duration, DEFAULT_FRAME_RATE) | |
| print(f"Frames: {num_frames} ({duration}s @ {DEFAULT_FRAME_RATE}fps)") | |
| images = [] | |
| output_dir = Path("outputs") | |
| output_dir.mkdir(exist_ok=True) | |
| if first_image is not None: | |
| temp_first_path = output_dir / f"temp_first_{current_seed}.jpg" | |
| if hasattr(first_image, "save"): | |
| first_image.save(temp_first_path) | |
| else: | |
| import shutil | |
| shutil.copy(first_image, temp_first_path) | |
| images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0)) | |
| if last_image is not None: | |
| temp_last_path = output_dir / f"temp_last_{current_seed}.jpg" | |
| if hasattr(last_image, "save"): | |
| last_image.save(temp_last_path) | |
| else: | |
| import shutil | |
| shutil.copy(last_image, temp_last_path) | |
| images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0)) | |
| tiling_config = TilingConfig.default() | |
| video_chunks_number = get_video_chunks_number(num_frames, tiling_config) | |
| video_guider_params = MultiModalGuiderParams( | |
| cfg_scale=video_cfg_scale, | |
| stg_scale=video_stg_scale, | |
| rescale_scale=video_rescale_scale, | |
| modality_scale=video_a2v_scale, | |
| skip_step=0, | |
| stg_blocks=[], | |
| ) | |
| audio_guider_params = MultiModalGuiderParams( | |
| cfg_scale=audio_cfg_scale, | |
| stg_scale=audio_stg_scale, | |
| rescale_scale=audio_rescale_scale, | |
| modality_scale=audio_v2a_scale, | |
| skip_step=0, | |
| stg_blocks=[], | |
| ) | |
| log_memory("before pipeline call") | |
| apply_prepared_lora_state_to_pipeline() | |
| video, audio = pipeline( | |
| prompt=prompt, | |
| negative_prompt=negative_prompt, | |
| seed=current_seed, | |
| height=height, | |
| width=width, | |
| num_frames=num_frames, | |
| frame_rate=DEFAULT_FRAME_RATE, | |
| video_guider_params=video_guider_params, | |
| audio_guider_params=audio_guider_params, | |
| images=images, | |
| tiling_config=tiling_config, | |
| ) | |
| log_memory("after pipeline call") | |
| output_path = tempfile.mktemp(suffix=".mp4") | |
| encode_video( | |
| video=video, | |
| fps=DEFAULT_FRAME_RATE, | |
| audio=audio, | |
| output_path=output_path, | |
| video_chunks_number=video_chunks_number, | |
| ) | |
| log_memory("after encode_video") | |
| return str(output_path), current_seed | |
| except Exception as e: | |
| import traceback | |
| log_memory("on error") | |
| print(f"Error: {str(e)}\n{traceback.format_exc()}") | |
| return None, current_seed | |
| # ============================================================================= | |
| # Gradio UI | |
| # ============================================================================= | |
| css = """ | |
| .fillable {max-width: 1200px !important} | |
| .progress-text {color: black} | |
| """ | |
| with gr.Blocks(title="LTX-2.3 Two-Stage HQ with LoRA Cache") as demo: | |
| gr.Markdown("# LTX-2.3 Two-Stage HQ Video Generation with LoRA Cache") | |
| gr.Markdown( | |
| "High-quality text/image-to-video with cached LoRA state + CFG guidance. " | |
| "[[Model]](https://huggingface.co/Lightricks/LTX-2.3)" | |
| ) | |
| with gr.Row(): | |
| # LEFT SIDE: Input Controls | |
| with gr.Column(): | |
| with gr.Row(): | |
| first_image = gr.Image(label="First Frame (Optional)", type="pil") | |
| last_image = gr.Image(label="Last Frame (Optional)", type="pil") | |
| prompt = gr.Textbox( | |
| label="Prompt", | |
| value=DEFAULT_PROMPT, | |
| lines=3, | |
| ) | |
| negative_prompt = gr.Textbox( | |
| label="Negative Prompt", | |
| value=DEFAULT_NEGATIVE_PROMPT, | |
| lines=2, | |
| ) | |
| duration = gr.Slider( | |
| label="Duration (seconds)", | |
| minimum=1.0, maximum=30.0, value=10.0, step=0.1, | |
| ) | |
| with gr.Row(): | |
| seed = gr.Number(label="Seed", value=42, precision=0, minimum=0, maximum=MAX_SEED) | |
| randomize_seed = gr.Checkbox(label="Randomize Seed", value=True) | |
| with gr.Row(): | |
| high_res = gr.Checkbox(label="High Resolution", value=True) | |
| with gr.Row(): | |
| width = gr.Number(label="Width", value=1536, precision=0) | |
| height = gr.Number(label="Height", value=1024, precision=0) | |
| generate_btn = gr.Button("Generate Video", variant="primary", size="lg") | |
| with gr.Accordion("Advanced Settings", open=False): | |
| gr.Markdown("### Video Guidance Parameters") | |
| with gr.Row(): | |
| video_cfg_scale = gr.Slider( | |
| label="Video CFG Scale", minimum=1.0, maximum=10.0, | |
| value=LTX_2_3_HQ_PARAMS.video_guider_params.cfg_scale, step=0.1 | |
| ) | |
| video_stg_scale = gr.Slider( | |
| label="Video STG Scale", minimum=0.0, maximum=2.0, value=0.0, step=0.1 | |
| ) | |
| with gr.Row(): | |
| video_rescale_scale = gr.Slider( | |
| label="Video Rescale", minimum=0.0, maximum=2.0, value=0.45, step=0.1 | |
| ) | |
| video_a2v_scale = gr.Slider( | |
| label="A2V Scale", minimum=0.0, maximum=5.0, value=3.0, step=0.1 | |
| ) | |
| gr.Markdown("### Audio Guidance Parameters") | |
| with gr.Row(): | |
| audio_cfg_scale = gr.Slider( | |
| label="Audio CFG Scale", minimum=1.0, maximum=15.0, | |
| value=LTX_2_3_HQ_PARAMS.audio_guider_params.cfg_scale, step=0.1 | |
| ) | |
| audio_stg_scale = gr.Slider( | |
| label="Audio STG Scale", minimum=0.0, maximum=2.0, value=0.0, step=0.1 | |
| ) | |
| with gr.Row(): | |
| audio_rescale_scale = gr.Slider( | |
| label="Audio Rescale", minimum=0.0, maximum=2.0, value=1.0, step=0.1 | |
| ) | |
| audio_v2a_scale = gr.Slider( | |
| label="V2A Scale", minimum=0.0, maximum=5.0, value=3.0, step=0.1 | |
| ) | |
| # RIGHT SIDE: Output and LoRA | |
| with gr.Column(): | |
| output_video = gr.Video(label="Generated Video", autoplay=False) | |
| gpu_duration = gr.Slider( | |
| label="ZeroGPU duration (seconds)", | |
| minimum=30.0, maximum=240.0, value=90.0, step=1.0, | |
| info="Increase for longer videos, higher resolution, or LoRA usage" | |
| ) | |
| gr.Markdown("### LoRA Adapter Strengths") | |
| gr.Markdown("Set to 0 to disable, then click 'Prepare LoRA Cache'") | |
| with gr.Row(): | |
| distilled_strength = gr.Slider(label="Distilled LoRA", minimum=0.0, maximum=1.5, value=0.0, step=0.01) | |
| pose_strength = gr.Slider(label="Anthro Enhancer", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| with gr.Row(): | |
| general_strength = gr.Slider(label="Reasoning Enhancer", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| motion_strength = gr.Slider(label="Anthro Posing", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| with gr.Row(): | |
| dreamlay_strength = gr.Slider(label="Dreamlay", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| mself_strength = gr.Slider(label="Mself", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| with gr.Row(): | |
| dramatic_strength = gr.Slider(label="Dramatic", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| fluid_strength = gr.Slider(label="Fluid Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| with gr.Row(): | |
| liquid_strength = gr.Slider(label="Liquid Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| demopose_strength = gr.Slider(label="Audio Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| with gr.Row(): | |
| voice_strength = gr.Slider(label="Voice Helper", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| realism_strength = gr.Slider(label="Anthro Realism", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| with gr.Row(): | |
| transition_strength = gr.Slider(label="POV", minimum=0.0, maximum=2.0, value=0.0, step=0.01) | |
| gr.Markdown("") # Spacer for alignment | |
| prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary") | |
| lora_status = gr.Textbox( | |
| label="LoRA Cache Status", | |
| value="No LoRA state prepared yet.", | |
| interactive=False, | |
| ) | |
| # Event handlers | |
| first_image.change(fn=on_image_upload, inputs=[first_image, last_image, high_res], outputs=[width, height]) | |
| last_image.change(fn=on_image_upload, inputs=[first_image, last_image, high_res], outputs=[width, height]) | |
| high_res.change(fn=on_highres_toggle, inputs=[first_image, last_image, high_res], outputs=[width, height]) | |
| prepare_lora_btn.click( | |
| fn=prepare_lora_cache, | |
| inputs=[distilled_strength, pose_strength, general_strength, motion_strength, dreamlay_strength, | |
| mself_strength, dramatic_strength, fluid_strength, liquid_strength, | |
| demopose_strength, voice_strength, realism_strength, transition_strength], | |
| outputs=[lora_status], | |
| ) | |
| generate_btn.click( | |
| fn=generate_video, | |
| inputs=[ | |
| first_image, last_image, prompt, negative_prompt, duration, gpu_duration, | |
| seed, randomize_seed, height, width, | |
| video_cfg_scale, video_stg_scale, video_rescale_scale, video_a2v_scale, | |
| audio_cfg_scale, audio_stg_scale, audio_rescale_scale, audio_v2a_scale, | |
| distilled_strength, pose_strength, general_strength, motion_strength, | |
| dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, | |
| liquid_strength, demopose_strength, voice_strength, realism_strength, | |
| transition_strength, | |
| ], | |
| outputs=[output_video, seed], | |
| ) | |
| if __name__ == "__main__": | |
| demo.queue().launch(theme=gr.themes.Citrus(), css=css, mcp_server=False) |