dagloop5 commited on
Commit
283039a
·
verified ·
1 Parent(s): 4df4f5d

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +891 -0
app.py ADDED
@@ -0,0 +1,891 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import sys
4
+
5
+ # Disable torch.compile / dynamo before any torch import
6
+ os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
+ os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
+
9
+ # Install xformers for memory-efficient attention
10
+ subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
+
12
+ # Clone LTX-2 repo and install packages
13
+ LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
+ LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
+
16
+ LTX_COMMIT = "ae855f8538843825f9015a419cf4ba5edaf5eec2" # known working commit with decode_video
17
+
18
+ if not os.path.exists(LTX_REPO_DIR):
19
+ print(f"Cloning {LTX_REPO_URL}...")
20
+ subprocess.run(["git", "clone", LTX_REPO_URL, LTX_REPO_DIR], check=True)
21
+ subprocess.run(["git", "checkout", LTX_COMMIT], cwd=LTX_REPO_DIR, check=True)
22
+
23
+ print("Installing ltx-core and ltx-pipelines from cloned repo...")
24
+ subprocess.run(
25
+ [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
26
+ os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
27
+ "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
28
+ check=True,
29
+ )
30
+
31
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
32
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
33
+
34
+ import logging
35
+ import random
36
+ import tempfile
37
+ from pathlib import Path
38
+ import gc
39
+ import hashlib
40
+
41
+ import torch
42
+ torch._dynamo.config.suppress_errors = True
43
+ torch._dynamo.config.disable = True
44
+
45
+ import spaces
46
+ import gradio as gr
47
+ import numpy as np
48
+ from huggingface_hub import hf_hub_download, snapshot_download
49
+ from safetensors.torch import load_file, save_file
50
+
51
+ from ltx_core.components.diffusion_steps import EulerDiffusionStep
52
+ from ltx_core.components.noisers import GaussianNoiser
53
+ from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
54
+ from ltx_core.model.upsampler import upsample_video
55
+ from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number, decode_video as vae_decode_video
56
+ from ltx_core.quantization import QuantizationPolicy
57
+ from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
58
+ from ltx_pipelines.distilled import DistilledPipeline
59
+ from ltx_pipelines.utils import euler_denoising_loop
60
+ from ltx_pipelines.utils.args import ImageConditioningInput
61
+ from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
62
+ from ltx_pipelines.utils.helpers import (
63
+ cleanup_memory,
64
+ combined_image_conditionings,
65
+ denoise_video_only,
66
+ encode_prompts,
67
+ simple_denoising_func,
68
+ )
69
+ from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
70
+ from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
71
+ from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
72
+
73
+ # Force-patch xformers attention into the LTX attention module.
74
+ from ltx_core.model.transformer import attention as _attn_mod
75
+ print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
76
+ try:
77
+ from xformers.ops import memory_efficient_attention as _mea
78
+ _attn_mod.memory_efficient_attention = _mea
79
+ print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
80
+ except Exception as e:
81
+ print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
82
+
83
+ logging.getLogger().setLevel(logging.INFO)
84
+
85
+ MAX_SEED = np.iinfo(np.int32).max
86
+ DEFAULT_PROMPT = (
87
+ "An astronaut hatches from a fragile egg on the surface of the Moon, "
88
+ "the shell cracking and peeling apart in gentle low-gravity motion. "
89
+ "Fine lunar dust lifts and drifts outward with each movement, floating "
90
+ "in slow arcs before settling back onto the ground."
91
+ )
92
+ DEFAULT_FRAME_RATE = 24.0
93
+
94
+ # Resolution presets: (width, height)
95
+ RESOLUTIONS = {
96
+ "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024), "9:7": (1408, 1088), "7:9": (1088, 1408), "19:13": (1472, 1008), "13:19": (1008, 1472)},
97
+ "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768), "9:7": (704, 544), "7:9": (544, 704), "19:13": (736, 504), "13:19": (504, 736)},
98
+ }
99
+
100
+
101
+ class LTX23DistilledA2VPipeline(DistilledPipeline):
102
+ """DistilledPipeline with optional audio conditioning."""
103
+
104
+ def __call__(
105
+ self,
106
+ prompt: str,
107
+ seed: int,
108
+ height: int,
109
+ width: int,
110
+ num_frames: int,
111
+ frame_rate: float,
112
+ images: list[ImageConditioningInput],
113
+ audio_path: str | None = None,
114
+ tiling_config: TilingConfig | None = None,
115
+ enhance_prompt: bool = False,
116
+ ):
117
+ # Standard path when no audio input is provided.
118
+ print(prompt)
119
+ if audio_path is None:
120
+ return super().__call__(
121
+ prompt=prompt,
122
+ seed=seed,
123
+ height=height,
124
+ width=width,
125
+ num_frames=num_frames,
126
+ frame_rate=frame_rate,
127
+ images=images,
128
+ tiling_config=tiling_config,
129
+ enhance_prompt=enhance_prompt,
130
+ )
131
+
132
+ generator = torch.Generator(device=self.device).manual_seed(seed)
133
+ noiser = GaussianNoiser(generator=generator)
134
+ stepper = EulerDiffusionStep()
135
+ dtype = torch.bfloat16
136
+
137
+ (ctx_p,) = encode_prompts(
138
+ [prompt],
139
+ self.model_ledger,
140
+ enhance_first_prompt=enhance_prompt,
141
+ enhance_prompt_image=images[0].path if len(images) > 0 else None,
142
+ )
143
+ video_context, audio_context = ctx_p.video_encoding, ctx_p.audio_encoding
144
+
145
+ video_duration = num_frames / frame_rate
146
+ decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
147
+ if decoded_audio is None:
148
+ raise ValueError(f"Could not extract audio stream from {audio_path}")
149
+
150
+ encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
151
+ audio_shape = AudioLatentShape.from_duration(batch=1, duration=video_duration, channels=8, mel_bins=16)
152
+ expected_frames = audio_shape.frames
153
+ actual_frames = encoded_audio_latent.shape[2]
154
+
155
+ if actual_frames > expected_frames:
156
+ encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
157
+ elif actual_frames < expected_frames:
158
+ pad = torch.zeros(
159
+ encoded_audio_latent.shape[0],
160
+ encoded_audio_latent.shape[1],
161
+ expected_frames - actual_frames,
162
+ encoded_audio_latent.shape[3],
163
+ device=encoded_audio_latent.device,
164
+ dtype=encoded_audio_latent.dtype,
165
+ )
166
+ encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
167
+
168
+ video_encoder = self.model_ledger.video_encoder()
169
+ transformer = self.model_ledger.transformer()
170
+ stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
171
+
172
+ def denoising_loop(sigmas, video_state, audio_state, stepper):
173
+ return euler_denoising_loop(
174
+ sigmas=sigmas,
175
+ video_state=video_state,
176
+ audio_state=audio_state,
177
+ stepper=stepper,
178
+ denoise_fn=simple_denoising_func(
179
+ video_context=video_context,
180
+ audio_context=audio_context,
181
+ transformer=transformer,
182
+ ),
183
+ )
184
+
185
+ stage_1_output_shape = VideoPixelShape(
186
+ batch=1,
187
+ frames=num_frames,
188
+ width=width // 2,
189
+ height=height // 2,
190
+ fps=frame_rate,
191
+ )
192
+ stage_1_conditionings = combined_image_conditionings(
193
+ images=images,
194
+ height=stage_1_output_shape.height,
195
+ width=stage_1_output_shape.width,
196
+ video_encoder=video_encoder,
197
+ dtype=dtype,
198
+ device=self.device,
199
+ )
200
+ video_state = denoise_video_only(
201
+ output_shape=stage_1_output_shape,
202
+ conditionings=stage_1_conditionings,
203
+ noiser=noiser,
204
+ sigmas=stage_1_sigmas,
205
+ stepper=stepper,
206
+ denoising_loop_fn=denoising_loop,
207
+ components=self.pipeline_components,
208
+ dtype=dtype,
209
+ device=self.device,
210
+ initial_audio_latent=encoded_audio_latent,
211
+ )
212
+
213
+ torch.cuda.synchronize()
214
+ cleanup_memory()
215
+
216
+ upscaled_video_latent = upsample_video(
217
+ latent=video_state.latent[:1],
218
+ video_encoder=video_encoder,
219
+ upsampler=self.model_ledger.spatial_upsampler(),
220
+ )
221
+ stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=self.device)
222
+ stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate)
223
+ stage_2_conditionings = combined_image_conditionings(
224
+ images=images,
225
+ height=stage_2_output_shape.height,
226
+ width=stage_2_output_shape.width,
227
+ video_encoder=video_encoder,
228
+ dtype=dtype,
229
+ device=self.device,
230
+ )
231
+ video_state = denoise_video_only(
232
+ output_shape=stage_2_output_shape,
233
+ conditionings=stage_2_conditionings,
234
+ noiser=noiser,
235
+ sigmas=stage_2_sigmas,
236
+ stepper=stepper,
237
+ denoising_loop_fn=denoising_loop,
238
+ components=self.pipeline_components,
239
+ dtype=dtype,
240
+ device=self.device,
241
+ noise_scale=stage_2_sigmas[0],
242
+ initial_video_latent=upscaled_video_latent,
243
+ initial_audio_latent=encoded_audio_latent,
244
+ )
245
+
246
+ torch.cuda.synchronize()
247
+ del transformer
248
+ del video_encoder
249
+ cleanup_memory()
250
+
251
+ decoded_video = vae_decode_video(
252
+ video_state.latent,
253
+ self.model_ledger.video_decoder(),
254
+ tiling_config,
255
+ generator,
256
+ )
257
+ original_audio = Audio(
258
+ waveform=decoded_audio.waveform.squeeze(0),
259
+ sampling_rate=decoded_audio.sampling_rate,
260
+ )
261
+ return decoded_video, original_audio
262
+
263
+
264
+ # Model repos
265
+ LTX_MODEL_REPO = "Lightricks/LTX-2.3"
266
+ GEMMA_REPO ="Lightricks/gemma-3-12b-it-qat-q4_0-unquantized"
267
+ EROS_REPO = "dagloop5/LoRA"
268
+ EROS_FILE = "ltx2310eros_beta.safetensors"
269
+
270
+ # Download model checkpoints
271
+ print("=" * 80)
272
+ print("Downloading LTX-2.3 distilled model + Gemma...")
273
+ print("=" * 80)
274
+
275
+ # LoRA cache directory and currently-applied key
276
+ LORA_CACHE_DIR = Path("lora_cache")
277
+ LORA_CACHE_DIR.mkdir(exist_ok=True)
278
+ current_lora_key: str | None = None
279
+
280
+ PENDING_LORA_KEY: str | None = None
281
+ PENDING_LORA_STATE: dict[str, torch.Tensor] | None = None
282
+ PENDING_LORA_STATUS: str = "No LoRA state prepared yet."
283
+
284
+ # --- 1. 10Eros checkpoint: FP8→BF16 conversion + DEV metadata injection ---
285
+ # Official DEV checkpoint is BF16. fp8_cast() expects BF16 input.
286
+ # 10Eros is FP8 (CivitAI format) → convert to BF16 to match official dtype distribution.
287
+ print("[1/4] Preparing 10Eros checkpoint...")
288
+ eros_fp8_path = hf_hub_download(repo_id=EROS_REPO, filename=EROS_FILE)
289
+ print(f" Downloaded: {eros_fp8_path}")
290
+
291
+ EROS_FIXED = "/tmp/eros_bf16_with_meta.safetensors"
292
+ if os.path.exists(EROS_FIXED):
293
+ print(" Using cached BF16 checkpoint")
294
+ else:
295
+ # Fetch DEV checkpoint metadata from header only (first 2MB, not full 46GB)
296
+ print(" Fetching DEV checkpoint metadata (header only)...")
297
+ dev_url = f"https://huggingface.co/{LTX_OFFICIAL_REPO}/resolve/main/ltx-2.3-22b-dev.safetensors"
298
+ hdr_resp = requests.get(dev_url, headers={"Range": "bytes=0-2000000"}, timeout=30)
299
+ hdr_size = int.from_bytes(hdr_resp.content[:8], "little")
300
+ hdr_json = json.loads(hdr_resp.content[8:8 + min(hdr_size, len(hdr_resp.content) - 8)])
301
+ dev_metadata = hdr_json.get("__metadata__", {})
302
+ print(f" DEV metadata keys: {list(dev_metadata.keys())}")
303
+
304
+ # Convert FP8→BF16 + inject metadata
305
+ print(" Converting FP8→BF16 (lossless upcast)...")
306
+ _fp8_types = {torch.float8_e4m3fn, torch.float8_e5m2}
307
+ tensors = {}
308
+ _converted = 0
309
+ with safe_open(eros_fp8_path, framework="pt") as f:
310
+ for key in f.keys():
311
+ tensor = f.get_tensor(key)
312
+ if tensor.dtype in _fp8_types:
313
+ tensors[key] = tensor.to(torch.bfloat16)
314
+ _converted += 1
315
+ else:
316
+ tensors[key] = tensor
317
+ print(f" Converted {_converted} FP8→BF16, kept {len(tensors)-_converted} as-is")
318
+ save_file(tensors, EROS_FIXED, metadata=dev_metadata)
319
+ del tensors
320
+ gc.collect()
321
+ print(" Saved with DEV metadata")
322
+
323
+ checkpoint_path = EROS_FIXED
324
+
325
+ spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
326
+ gemma_root = snapshot_download(repo_id=GEMMA_REPO)
327
+
328
+ # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
329
+ # LoRA repo + download the requested LoRA adapters
330
+ LORA_REPO = "dagloop5/LoRA"
331
+
332
+ print("=" * 80)
333
+ print("Downloading LoRA adapters from dagloop5/LoRA...")
334
+ print("=" * 80)
335
+ pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors")
336
+ general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_VBVR_Reasoning_I2V_V2.safetensors")
337
+ motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
338
+ dreamlay_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="DR34ML4Y_LTXXX_PREVIEW_RC1.safetensors") # m15510n4ry, bl0wj0b, d0ubl3_bj, d0gg1e, c0wg1rl
339
+ mself_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="Furry Hyper Masturbation - LTX-2 I2V v1.safetensors") # Hyperfap
340
+ dramatic_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2.3 - Orgasm.safetensors") # "[He | She] is having am orgasm." (am or an?)
341
+ fluid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="cr3ampi3_animation_i2v_ltx2_v1.0.safetensors") # cr3ampi3 animation., missionary animation, doggystyle bouncy animation, double penetration animation
342
+ liquid_lora_path = hf_hub_download(repo_id="valiantcat/LTX-2.3-Transition-LORA", filename="ltx2.3-transition.safetensors") # wet dr1pp
343
+ demopose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="hentai_voice_ltx23.safetensors")
344
+
345
+ print(f"Pose LoRA: {pose_lora_path}")
346
+ print(f"General LoRA: {general_lora_path}")
347
+ print(f"Motion LoRA: {motion_lora_path}")
348
+ print(f"Dreamlay LoRA: {dreamlay_lora_path}")
349
+ print(f"Mself LoRA: {mself_lora_path}")
350
+ print(f"Dramatic LoRA: {dramatic_lora_path}")
351
+ print(f"Fluid LoRA: {fluid_lora_path}")
352
+ print(f"Liquid LoRA: {liquid_lora_path}")
353
+ print(f"Demopose LoRA: {demopose_lora_path}")
354
+ # ----------------------------------------------------------------
355
+
356
+ print(f"Checkpoint: {checkpoint_path}")
357
+ print(f"Spatial upsampler: {spatial_upsampler_path}")
358
+ print(f"Gemma root: {gemma_root}")
359
+
360
+ # Initialize pipeline WITH text encoder and optional audio support
361
+ # ---- Replace block (pipeline init) lines 275-281 ----
362
+ pipeline = LTX23DistilledA2VPipeline(
363
+ distilled_checkpoint_path=checkpoint_path,
364
+ spatial_upsampler_path=spatial_upsampler_path,
365
+ gemma_root=gemma_root,
366
+ loras=[],
367
+ quantization=QuantizationPolicy.fp8_cast(), # keep FP8 quantization unchanged
368
+ )
369
+ # ----------------------------------------------------------------
370
+
371
+ def _make_lora_key(pose_strength: float, general_strength: float, motion_strength: float, dreamlay_strength: float, mself_strength: float, dramatic_strength: float, fluid_strength: float, liquid_strength: float, demopose_strength: float) -> tuple[str, str]:
372
+ rp = round(float(pose_strength), 2)
373
+ rg = round(float(general_strength), 2)
374
+ rm = round(float(motion_strength), 2)
375
+ rd = round(float(dreamlay_strength), 2)
376
+ rs = round(float(mself_strength), 2)
377
+ rr = round(float(dramatic_strength), 2)
378
+ rf = round(float(fluid_strength), 2)
379
+ rl = round(float(liquid_strength), 2)
380
+ ro = round(float(demopose_strength), 2)
381
+ key_str = f"{pose_lora_path}:{rp}|{general_lora_path}:{rg}|{motion_lora_path}:{rm}|{dreamlay_lora_path}:{rd}|{mself_lora_path}:{rs}|{dramatic_lora_path}:{rr}|{fluid_lora_path}:{rf}|{liquid_lora_path}:{rl}|{demopose_lora_path}:{ro}"
382
+ key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
383
+ return key, key_str
384
+
385
+
386
+ def prepare_lora_cache(
387
+ pose_strength: float,
388
+ general_strength: float,
389
+ motion_strength: float,
390
+ dreamlay_strength: float,
391
+ mself_strength: float,
392
+ dramatic_strength: float,
393
+ fluid_strength: float,
394
+ liquid_strength: float,
395
+ demopose_strength: float,
396
+ progress=gr.Progress(track_tqdm=True),
397
+ ):
398
+ """
399
+ CPU-only step:
400
+ - checks cache
401
+ - loads cached fused transformer state_dict, or
402
+ - builds fused transformer on CPU and saves it
403
+ The resulting state_dict is stored in memory and can be applied later.
404
+ """
405
+ global PENDING_LORA_KEY, PENDING_LORA_STATE, PENDING_LORA_STATUS
406
+
407
+ ledger = pipeline.model_ledger
408
+ key, _ = _make_lora_key(pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength)
409
+ cache_path = LORA_CACHE_DIR / f"{key}.safetensors"
410
+
411
+ progress(0.05, desc="Preparing LoRA state")
412
+ if cache_path.exists():
413
+ try:
414
+ progress(0.20, desc="Loading cached fused state")
415
+ state = load_file(str(cache_path))
416
+ PENDING_LORA_KEY = key
417
+ PENDING_LORA_STATE = state
418
+ PENDING_LORA_STATUS = f"Loaded cached LoRA state: {cache_path.name}"
419
+ return PENDING_LORA_STATUS
420
+ except Exception as e:
421
+ print(f"[LoRA] Cache load failed: {type(e).__name__}: {e}")
422
+
423
+ entries = [
424
+ (pose_lora_path, round(float(pose_strength), 2)),
425
+ (general_lora_path, round(float(general_strength), 2)),
426
+ (motion_lora_path, round(float(motion_strength), 2)),
427
+ (dreamlay_lora_path, round(float(dreamlay_strength), 2)),
428
+ (mself_lora_path, round(float(mself_strength), 2)),
429
+ (dramatic_lora_path, round(float(dramatic_strength), 2)),
430
+ (fluid_lora_path, round(float(fluid_strength), 2)),
431
+ (liquid_lora_path, round(float(liquid_strength), 2)),
432
+ (demopose_lora_path, round(float(demopose_strength), 2)),
433
+ ]
434
+ loras_for_builder = [
435
+ LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
436
+ for path, strength in entries
437
+ if path is not None and float(strength) != 0.0
438
+ ]
439
+
440
+ if not loras_for_builder:
441
+ PENDING_LORA_KEY = None
442
+ PENDING_LORA_STATE = None
443
+ PENDING_LORA_STATUS = "No non-zero LoRA strengths selected; nothing to prepare."
444
+ return PENDING_LORA_STATUS
445
+
446
+ tmp_ledger = None
447
+ new_transformer_cpu = None
448
+ try:
449
+ progress(0.35, desc="Building fused CPU transformer")
450
+ tmp_ledger = pipeline.model_ledger.__class__(
451
+ dtype=ledger.dtype,
452
+ device=torch.device("cpu"),
453
+ checkpoint_path=str(checkpoint_path),
454
+ spatial_upsampler_path=str(spatial_upsampler_path),
455
+ gemma_root_path=str(gemma_root),
456
+ loras=tuple(loras_for_builder),
457
+ quantization=getattr(ledger, "quantization", None),
458
+ )
459
+ new_transformer_cpu = tmp_ledger.transformer()
460
+
461
+ progress(0.70, desc="Extracting fused state_dict")
462
+ state = {
463
+ k: v.detach().cpu().contiguous()
464
+ for k, v in new_transformer_cpu.state_dict().items()
465
+ }
466
+ save_file(state, str(cache_path))
467
+
468
+ PENDING_LORA_KEY = key
469
+ PENDING_LORA_STATE = state
470
+ PENDING_LORA_STATUS = f"Built and cached LoRA state: {cache_path.name}"
471
+ return PENDING_LORA_STATUS
472
+
473
+ except Exception as e:
474
+ import traceback
475
+ print(f"[LoRA] Prepare failed: {type(e).__name__}: {e}")
476
+ print(traceback.format_exc())
477
+ PENDING_LORA_KEY = None
478
+ PENDING_LORA_STATE = None
479
+ PENDING_LORA_STATUS = f"LoRA prepare failed: {type(e).__name__}: {e}"
480
+ return PENDING_LORA_STATUS
481
+
482
+ finally:
483
+ try:
484
+ del new_transformer_cpu
485
+ except Exception:
486
+ pass
487
+ try:
488
+ del tmp_ledger
489
+ except Exception:
490
+ pass
491
+ gc.collect()
492
+
493
+
494
+ def apply_prepared_lora_state_to_pipeline():
495
+ """
496
+ Fast step: copy the already prepared CPU state into the live transformer.
497
+ This is the only part that should remain near generation time.
498
+ """
499
+ global current_lora_key, PENDING_LORA_KEY, PENDING_LORA_STATE
500
+
501
+ if PENDING_LORA_STATE is None or PENDING_LORA_KEY is None:
502
+ print("[LoRA] No prepared LoRA state available; skipping.")
503
+ return False
504
+
505
+ if current_lora_key == PENDING_LORA_KEY:
506
+ print("[LoRA] Prepared LoRA state already active; skipping.")
507
+ return True
508
+
509
+ existing_transformer = _transformer
510
+ with torch.no_grad():
511
+ missing, unexpected = existing_transformer.load_state_dict(PENDING_LORA_STATE, strict=False)
512
+ if missing or unexpected:
513
+ print(f"[LoRA] load_state_dict mismatch: missing={len(missing)}, unexpected={len(unexpected)}")
514
+
515
+ current_lora_key = PENDING_LORA_KEY
516
+ print("[LoRA] Prepared LoRA state applied to the pipeline.")
517
+ return True
518
+
519
+ # ---- REPLACE PRELOAD BLOCK START ----
520
+ # Preload all models for ZeroGPU tensor packing.
521
+ print("Preloading all models (including Gemma and audio components)...")
522
+ ledger = pipeline.model_ledger
523
+
524
+ # Save the original factory methods so we can rebuild individual components later.
525
+ # These are bound callables on ledger that will call the builder when invoked.
526
+ _orig_transformer_factory = ledger.transformer
527
+ _orig_video_encoder_factory = ledger.video_encoder
528
+ _orig_video_decoder_factory = ledger.video_decoder
529
+ _orig_audio_encoder_factory = ledger.audio_encoder
530
+ _orig_audio_decoder_factory = ledger.audio_decoder
531
+ _orig_vocoder_factory = ledger.vocoder
532
+ _orig_spatial_upsampler_factory = ledger.spatial_upsampler
533
+ _orig_text_encoder_factory = ledger.text_encoder
534
+ _orig_gemma_embeddings_factory = ledger.gemma_embeddings_processor
535
+
536
+ # Call the original factories once to create the cached instances we will serve by default.
537
+ _transformer = _orig_transformer_factory()
538
+ _video_encoder = _orig_video_encoder_factory()
539
+ _video_decoder = _orig_video_decoder_factory()
540
+ _audio_encoder = _orig_audio_encoder_factory()
541
+ _audio_decoder = _orig_audio_decoder_factory()
542
+ _vocoder = _orig_vocoder_factory()
543
+ _spatial_upsampler = _orig_spatial_upsampler_factory()
544
+ _text_encoder = _orig_text_encoder_factory()
545
+ _embeddings_processor = _orig_gemma_embeddings_factory()
546
+
547
+ # Replace ledger methods with lightweight lambdas that return the cached instances.
548
+ # We keep the original factories above so we can call them later to rebuild components.
549
+ ledger.transformer = lambda: _transformer
550
+ ledger.video_encoder = lambda: _video_encoder
551
+ ledger.video_decoder = lambda: _video_decoder
552
+ ledger.audio_encoder = lambda: _audio_encoder
553
+ ledger.audio_decoder = lambda: _audio_decoder
554
+ ledger.vocoder = lambda: _vocoder
555
+ ledger.spatial_upsampler = lambda: _spatial_upsampler
556
+ ledger.text_encoder = lambda: _text_encoder
557
+ ledger.gemma_embeddings_processor = lambda: _embeddings_processor
558
+
559
+ print("All models preloaded (including Gemma text encoder and audio encoder)!")
560
+ # ---- REPLACE PRELOAD BLOCK END ----
561
+
562
+ print("=" * 80)
563
+ print("Pipeline ready!")
564
+ print("=" * 80)
565
+
566
+
567
+ def log_memory(tag: str):
568
+ if torch.cuda.is_available():
569
+ allocated = torch.cuda.memory_allocated() / 1024**3
570
+ peak = torch.cuda.max_memory_allocated() / 1024**3
571
+ free, total = torch.cuda.mem_get_info()
572
+ print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
573
+
574
+
575
+ def detect_aspect_ratio(image) -> str:
576
+ if image is None:
577
+ return "16:9"
578
+ if hasattr(image, "size"):
579
+ w, h = image.size
580
+ elif hasattr(image, "shape"):
581
+ h, w = image.shape[:2]
582
+ else:
583
+ return "16:9"
584
+ ratio = w / h
585
+ candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
586
+ return min(candidates, key=lambda k: abs(ratio - candidates[k]))
587
+
588
+
589
+ def on_image_upload(first_image, last_image, high_res):
590
+ ref_image = first_image if first_image is not None else last_image
591
+ aspect = detect_aspect_ratio(ref_image)
592
+ tier = "high" if high_res else "low"
593
+ w, h = RESOLUTIONS[tier][aspect]
594
+ return gr.update(value=w), gr.update(value=h)
595
+
596
+
597
+ def on_highres_toggle(first_image, last_image, high_res):
598
+ ref_image = first_image if first_image is not None else last_image
599
+ aspect = detect_aspect_ratio(ref_image)
600
+ tier = "high" if high_res else "low"
601
+ w, h = RESOLUTIONS[tier][aspect]
602
+ return gr.update(value=w), gr.update(value=h)
603
+
604
+
605
+ def get_gpu_duration(
606
+ first_image,
607
+ last_image,
608
+ input_audio,
609
+ prompt: str,
610
+ duration: float,
611
+ gpu_duration: float,
612
+ enhance_prompt: bool = True,
613
+ seed: int = 42,
614
+ randomize_seed: bool = True,
615
+ height: int = 1024,
616
+ width: int = 1536,
617
+ pose_strength: float = 0.0,
618
+ general_strength: float = 0.0,
619
+ motion_strength: float = 0.0,
620
+ dreamlay_strength: float = 0.0,
621
+ mself_strength: float = 0.0,
622
+ dramatic_strength: float = 0.0,
623
+ fluid_strength: float = 0.0,
624
+ liquid_strength: float = 0.0,
625
+ demopose_strength: float = 0.0,
626
+ progress=None,
627
+ ):
628
+ return int(gpu_duration)
629
+
630
+ @spaces.GPU(duration=get_gpu_duration)
631
+ @torch.inference_mode()
632
+ def generate_video(
633
+ first_image,
634
+ last_image,
635
+ input_audio,
636
+ prompt: str,
637
+ duration: float,
638
+ gpu_duration: float,
639
+ enhance_prompt: bool = True,
640
+ seed: int = 42,
641
+ randomize_seed: bool = True,
642
+ height: int = 1024,
643
+ width: int = 1536,
644
+ pose_strength: float = 0.0,
645
+ general_strength: float = 0.0,
646
+ motion_strength: float = 0.0,
647
+ dreamlay_strength: float = 0.0,
648
+ mself_strength: float = 0.0,
649
+ dramatic_strength: float = 0.0,
650
+ fluid_strength: float = 0.0,
651
+ liquid_strength: float = 0.0,
652
+ demopose_strength: float = 0.0,
653
+ progress=gr.Progress(track_tqdm=True),
654
+ ):
655
+ try:
656
+ torch.cuda.reset_peak_memory_stats()
657
+ log_memory("start")
658
+
659
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
660
+
661
+ frame_rate = DEFAULT_FRAME_RATE
662
+ num_frames = int(duration * frame_rate) + 1
663
+ num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
664
+
665
+ print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
666
+
667
+ images = []
668
+ output_dir = Path("outputs")
669
+ output_dir.mkdir(exist_ok=True)
670
+
671
+ if first_image is not None:
672
+ temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
673
+ if hasattr(first_image, "save"):
674
+ first_image.save(temp_first_path)
675
+ else:
676
+ temp_first_path = Path(first_image)
677
+ images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
678
+
679
+ if last_image is not None:
680
+ temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
681
+ if hasattr(last_image, "save"):
682
+ last_image.save(temp_last_path)
683
+ else:
684
+ temp_last_path = Path(last_image)
685
+ images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
686
+
687
+ tiling_config = TilingConfig.default()
688
+ video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
689
+
690
+ log_memory("before pipeline call")
691
+
692
+ apply_prepared_lora_state_to_pipeline()
693
+
694
+ video, audio = pipeline(
695
+ prompt=prompt,
696
+ seed=current_seed,
697
+ height=int(height),
698
+ width=int(width),
699
+ num_frames=num_frames,
700
+ frame_rate=frame_rate,
701
+ images=images,
702
+ audio_path=input_audio,
703
+ tiling_config=tiling_config,
704
+ enhance_prompt=enhance_prompt,
705
+ )
706
+
707
+ log_memory("after pipeline call")
708
+
709
+ output_path = tempfile.mktemp(suffix=".mp4")
710
+ encode_video(
711
+ video=video,
712
+ fps=frame_rate,
713
+ audio=audio,
714
+ output_path=output_path,
715
+ video_chunks_number=video_chunks_number,
716
+ )
717
+
718
+ log_memory("after encode_video")
719
+ return str(output_path), current_seed
720
+
721
+ except Exception as e:
722
+ import traceback
723
+ log_memory("on error")
724
+ print(f"Error: {str(e)}\n{traceback.format_exc()}")
725
+ return None, current_seed
726
+
727
+
728
+ with gr.Blocks(title="LTX-2.3 Distilled") as demo:
729
+ gr.Markdown("# LTX-2.3 F2LF with Fast Audio-Video Generation with Frame Conditioning")
730
+
731
+
732
+ with gr.Row():
733
+ with gr.Column():
734
+ with gr.Row():
735
+ first_image = gr.Image(label="First Frame (Optional)", type="pil")
736
+ last_image = gr.Image(label="Last Frame (Optional)", type="pil")
737
+ input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
738
+ prompt = gr.Textbox(
739
+ label="Prompt",
740
+ info="for best results - make it as elaborate as possible",
741
+ value="Make this image come alive with cinematic motion, smooth animation",
742
+ lines=3,
743
+ placeholder="Describe the motion and animation you want...",
744
+ )
745
+ duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=30.0, value=10.0, step=0.1)
746
+
747
+
748
+ generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
749
+
750
+ with gr.Accordion("Advanced Settings", open=False):
751
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
752
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
753
+ with gr.Row():
754
+ width = gr.Number(label="Width", value=1536, precision=0)
755
+ height = gr.Number(label="Height", value=1024, precision=0)
756
+ with gr.Row():
757
+ enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
758
+ high_res = gr.Checkbox(label="High Resolution", value=True)
759
+ with gr.Column():
760
+ gr.Markdown("### LoRA adapter strengths (set to 0 to disable; slow and WIP)")
761
+ pose_strength = gr.Slider(
762
+ label="Anthro Enhancer strength",
763
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
764
+ )
765
+ general_strength = gr.Slider(
766
+ label="Reasoning Enhancer strength",
767
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
768
+ )
769
+ motion_strength = gr.Slider(
770
+ label="Anthro Posing Helper strength",
771
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
772
+ )
773
+ dreamlay_strength = gr.Slider(
774
+ label="Dreamlay strength",
775
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
776
+ )
777
+ mself_strength = gr.Slider(
778
+ label="Mself strength",
779
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
780
+ )
781
+ dramatic_strength = gr.Slider(
782
+ label="Dramatic strength",
783
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
784
+ )
785
+ fluid_strength = gr.Slider(
786
+ label="Fluid Helper strength",
787
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
788
+ )
789
+ liquid_strength = gr.Slider(
790
+ label="Transition Helper strength",
791
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
792
+ )
793
+ demopose_strength = gr.Slider(
794
+ label="Audio Helper strength",
795
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
796
+ )
797
+ prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary")
798
+ lora_status = gr.Textbox(
799
+ label="LoRA Cache Status",
800
+ value="No LoRA state prepared yet.",
801
+ interactive=False,
802
+ )
803
+
804
+ with gr.Column():
805
+ output_video = gr.Video(label="Generated Video", autoplay=False)
806
+ gpu_duration = gr.Slider(
807
+ label="ZeroGPU duration (seconds; 10 second Img2Vid with 1024x1024 and LoRAs = ~70)",
808
+ minimum=30.0,
809
+ maximum=240.0,
810
+ value=75.0,
811
+ step=1.0,
812
+ )
813
+
814
+ gr.Examples(
815
+ examples=[
816
+ [
817
+ None,
818
+ "pinkknit.jpg",
819
+ None,
820
+ "The camera falls downward through darkness as if dropped into a tunnel. "
821
+ "As it slows, five friends wearing pink knitted hats and sunglasses lean "
822
+ "over and look down toward the camera with curious expressions. The lens "
823
+ "has a strong fisheye effect, creating a circular frame around them. They "
824
+ "crowd together closely, forming a symmetrical cluster while staring "
825
+ "directly into the lens.",
826
+ 3.0,
827
+ 80.0,
828
+ False,
829
+ 42,
830
+ True,
831
+ 1024,
832
+ 1024,
833
+ 0.0, # pose_strength (example)
834
+ 0.0, # general_strength (example)
835
+ 0.0, # motion_strength (example)
836
+ 0.0,
837
+ 0.0,
838
+ 0.0,
839
+ 0.0,
840
+ 0.0,
841
+ 0.0,
842
+ ],
843
+ ],
844
+ inputs=[
845
+ first_image, last_image, input_audio, prompt, duration, gpu_duration,
846
+ enhance_prompt, seed, randomize_seed, height, width,
847
+ pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength,
848
+ ],
849
+ )
850
+
851
+ first_image.change(
852
+ fn=on_image_upload,
853
+ inputs=[first_image, last_image, high_res],
854
+ outputs=[width, height],
855
+ )
856
+
857
+ last_image.change(
858
+ fn=on_image_upload,
859
+ inputs=[first_image, last_image, high_res],
860
+ outputs=[width, height],
861
+ )
862
+
863
+ high_res.change(
864
+ fn=on_highres_toggle,
865
+ inputs=[first_image, last_image, high_res],
866
+ outputs=[width, height],
867
+ )
868
+
869
+ prepare_lora_btn.click(
870
+ fn=prepare_lora_cache,
871
+ inputs=[pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength],
872
+ outputs=[lora_status],
873
+ )
874
+
875
+ generate_btn.click(
876
+ fn=generate_video,
877
+ inputs=[
878
+ first_image, last_image, input_audio, prompt, duration, gpu_duration, enhance_prompt,
879
+ seed, randomize_seed, height, width,
880
+ pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength,
881
+ ],
882
+ outputs=[output_video, seed],
883
+ )
884
+
885
+
886
+ css = """
887
+ .fillable{max-width: 1200px !important}
888
+ """
889
+
890
+ if __name__ == "__main__":
891
+ demo.launch(theme=gr.themes.Citrus(), css=css)