dagloop5 commited on
Commit
7b8c697
·
verified ·
1 Parent(s): 4083d43

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +920 -0
app.py ADDED
@@ -0,0 +1,920 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import subprocess
3
+ import sys
4
+
5
+ # Disable torch.compile / dynamo before any torch import
6
+ os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
+ os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
+
9
+ # Install xformers for memory-efficient attention
10
+ subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
+
12
+ # Clone LTX-2 repo and install packages
13
+ LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
+ LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
+
16
+ LTX_COMMIT = "ae855f8538843825f9015a419cf4ba5edaf5eec2" # known working commit with decode_video
17
+
18
+ if not os.path.exists(LTX_REPO_DIR):
19
+ print(f"Cloning {LTX_REPO_URL}...")
20
+ subprocess.run(["git", "clone", LTX_REPO_URL, LTX_REPO_DIR], check=True)
21
+ subprocess.run(["git", "checkout", LTX_COMMIT], cwd=LTX_REPO_DIR, check=True)
22
+
23
+ print("Installing ltx-core and ltx-pipelines from cloned repo...")
24
+ subprocess.run(
25
+ [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
26
+ os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
27
+ "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
28
+ check=True,
29
+ )
30
+
31
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
32
+ sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
33
+
34
+ import logging
35
+ import random
36
+ import tempfile
37
+ from pathlib import Path
38
+ import gc
39
+ import hashlib
40
+
41
+ import torch
42
+ torch._dynamo.config.suppress_errors = True
43
+ torch._dynamo.config.disable = True
44
+
45
+ import spaces
46
+ import gradio as gr
47
+ import numpy as np
48
+ from huggingface_hub import hf_hub_download, snapshot_download
49
+ from safetensors.torch import load_file, save_file
50
+ from safetensors import safe_open
51
+ import json
52
+ import requests
53
+
54
+ from ltx_core.components.diffusion_steps import EulerDiffusionStep
55
+ from ltx_core.components.noisers import GaussianNoiser
56
+ from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
57
+ from ltx_core.model.upsampler import upsample_video
58
+ from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number, decode_video as vae_decode_video
59
+ from ltx_core.quantization import QuantizationPolicy
60
+ from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
61
+ from ltx_pipelines.distilled import DistilledPipeline
62
+ from ltx_pipelines.utils import euler_denoising_loop
63
+ from ltx_pipelines.utils.args import ImageConditioningInput
64
+ from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
65
+ from ltx_pipelines.utils.helpers import (
66
+ cleanup_memory,
67
+ combined_image_conditionings,
68
+ denoise_video_only,
69
+ encode_prompts,
70
+ simple_denoising_func,
71
+ )
72
+ from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
73
+ from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
74
+ from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
75
+
76
+ # Force-patch xformers attention into the LTX attention module.
77
+ from ltx_core.model.transformer import attention as _attn_mod
78
+ print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
79
+ try:
80
+ from xformers.ops import memory_efficient_attention as _mea
81
+ _attn_mod.memory_efficient_attention = _mea
82
+ print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
83
+ except Exception as e:
84
+ print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
85
+
86
+ logging.getLogger().setLevel(logging.INFO)
87
+
88
+ MAX_SEED = np.iinfo(np.int32).max
89
+ DEFAULT_PROMPT = (
90
+ "An astronaut hatches from a fragile egg on the surface of the Moon, "
91
+ "the shell cracking and peeling apart in gentle low-gravity motion. "
92
+ "Fine lunar dust lifts and drifts outward with each movement, floating "
93
+ "in slow arcs before settling back onto the ground."
94
+ )
95
+ DEFAULT_FRAME_RATE = 24.0
96
+
97
+ # Resolution presets: (width, height)
98
+ RESOLUTIONS = {
99
+ "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
100
+ "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
101
+ }
102
+
103
+
104
+ class LTX23DistilledA2VPipeline(DistilledPipeline):
105
+ """DistilledPipeline with optional audio conditioning."""
106
+
107
+ def __call__(
108
+ self,
109
+ prompt: str,
110
+ seed: int,
111
+ height: int,
112
+ width: int,
113
+ num_frames: int,
114
+ frame_rate: float,
115
+ images: list[ImageConditioningInput],
116
+ audio_path: str | None = None,
117
+ tiling_config: TilingConfig | None = None,
118
+ enhance_prompt: bool = False,
119
+ ):
120
+ # Standard path when no audio input is provided.
121
+ print(prompt)
122
+ if audio_path is None:
123
+ return super().__call__(
124
+ prompt=prompt,
125
+ seed=seed,
126
+ height=height,
127
+ width=width,
128
+ num_frames=num_frames,
129
+ frame_rate=frame_rate,
130
+ images=images,
131
+ tiling_config=tiling_config,
132
+ enhance_prompt=enhance_prompt,
133
+ )
134
+
135
+ generator = torch.Generator(device=self.device).manual_seed(seed)
136
+ noiser = GaussianNoiser(generator=generator)
137
+ stepper = EulerDiffusionStep()
138
+ dtype = torch.bfloat16
139
+
140
+ (ctx_p,) = encode_prompts(
141
+ [prompt],
142
+ self.model_ledger,
143
+ enhance_first_prompt=enhance_prompt,
144
+ enhance_prompt_image=images[0].path if len(images) > 0 else None,
145
+ )
146
+ video_context, audio_context = ctx_p.video_encoding, ctx_p.audio_encoding
147
+
148
+ video_duration = num_frames / frame_rate
149
+ decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
150
+ if decoded_audio is None:
151
+ raise ValueError(f"Could not extract audio stream from {audio_path}")
152
+
153
+ encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
154
+ audio_shape = AudioLatentShape.from_duration(batch=1, duration=video_duration, channels=8, mel_bins=16)
155
+ expected_frames = audio_shape.frames
156
+ actual_frames = encoded_audio_latent.shape[2]
157
+
158
+ if actual_frames > expected_frames:
159
+ encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
160
+ elif actual_frames < expected_frames:
161
+ pad = torch.zeros(
162
+ encoded_audio_latent.shape[0],
163
+ encoded_audio_latent.shape[1],
164
+ expected_frames - actual_frames,
165
+ encoded_audio_latent.shape[3],
166
+ device=encoded_audio_latent.device,
167
+ dtype=encoded_audio_latent.dtype,
168
+ )
169
+ encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
170
+
171
+ video_encoder = self.model_ledger.video_encoder()
172
+ transformer = self.model_ledger.transformer()
173
+ stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
174
+
175
+ def denoising_loop(sigmas, video_state, audio_state, stepper):
176
+ return euler_denoising_loop(
177
+ sigmas=sigmas,
178
+ video_state=video_state,
179
+ audio_state=audio_state,
180
+ stepper=stepper,
181
+ denoise_fn=simple_denoising_func(
182
+ video_context=video_context,
183
+ audio_context=audio_context,
184
+ transformer=transformer,
185
+ ),
186
+ )
187
+
188
+ stage_1_output_shape = VideoPixelShape(
189
+ batch=1,
190
+ frames=num_frames,
191
+ width=width // 2,
192
+ height=height // 2,
193
+ fps=frame_rate,
194
+ )
195
+ stage_1_conditionings = combined_image_conditionings(
196
+ images=images,
197
+ height=stage_1_output_shape.height,
198
+ width=stage_1_output_shape.width,
199
+ video_encoder=video_encoder,
200
+ dtype=dtype,
201
+ device=self.device,
202
+ )
203
+ video_state = denoise_video_only(
204
+ output_shape=stage_1_output_shape,
205
+ conditionings=stage_1_conditionings,
206
+ noiser=noiser,
207
+ sigmas=stage_1_sigmas,
208
+ stepper=stepper,
209
+ denoising_loop_fn=denoising_loop,
210
+ components=self.pipeline_components,
211
+ dtype=dtype,
212
+ device=self.device,
213
+ initial_audio_latent=encoded_audio_latent,
214
+ )
215
+
216
+ torch.cuda.synchronize()
217
+ cleanup_memory()
218
+
219
+ upscaled_video_latent = upsample_video(
220
+ latent=video_state.latent[:1],
221
+ video_encoder=video_encoder,
222
+ upsampler=self.model_ledger.spatial_upsampler(),
223
+ )
224
+ stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=self.device)
225
+ stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate)
226
+ stage_2_conditionings = combined_image_conditionings(
227
+ images=images,
228
+ height=stage_2_output_shape.height,
229
+ width=stage_2_output_shape.width,
230
+ video_encoder=video_encoder,
231
+ dtype=dtype,
232
+ device=self.device,
233
+ )
234
+ video_state = denoise_video_only(
235
+ output_shape=stage_2_output_shape,
236
+ conditionings=stage_2_conditionings,
237
+ noiser=noiser,
238
+ sigmas=stage_2_sigmas,
239
+ stepper=stepper,
240
+ denoising_loop_fn=denoising_loop,
241
+ components=self.pipeline_components,
242
+ dtype=dtype,
243
+ device=self.device,
244
+ noise_scale=stage_2_sigmas[0],
245
+ initial_video_latent=upscaled_video_latent,
246
+ initial_audio_latent=encoded_audio_latent,
247
+ )
248
+
249
+ torch.cuda.synchronize()
250
+ del transformer
251
+ del video_encoder
252
+ cleanup_memory()
253
+
254
+ decoded_video = vae_decode_video(
255
+ video_state.latent,
256
+ self.model_ledger.video_decoder(),
257
+ tiling_config,
258
+ generator,
259
+ )
260
+ original_audio = Audio(
261
+ waveform=decoded_audio.waveform.squeeze(0),
262
+ sampling_rate=decoded_audio.sampling_rate,
263
+ )
264
+ return decoded_video, original_audio
265
+
266
+
267
+ # Model repos
268
+ LTX_MODEL_REPO = "Lightricks/LTX-2.3"
269
+ GEMMA_REPO ="Lightricks/gemma-3-12b-it-qat-q4_0-unquantized"
270
+
271
+ # Download model checkpoints
272
+ print("=" * 80)
273
+ print("Downloading LTX-2.3 distilled model + Gemma...")
274
+ print("=" * 80)
275
+
276
+ # LoRA cache directory and currently-applied key
277
+ LORA_CACHE_DIR = Path("lora_cache")
278
+ LORA_CACHE_DIR.mkdir(exist_ok=True)
279
+ current_lora_key: str | None = None
280
+
281
+ PENDING_LORA_KEY: str | None = None
282
+ PENDING_LORA_STATE: dict[str, torch.Tensor] | None = None
283
+ PENDING_LORA_STATUS: str = "No LoRA state prepared yet."
284
+
285
+ weights_dir = Path("weights")
286
+ weights_dir.mkdir(exist_ok=True)
287
+ checkpoint_path = hf_hub_download(
288
+ repo_id="TenStrip/LTX2.3-10Eros",
289
+ filename="10Eros_v1_bf16.safetensors",
290
+ local_dir=str(weights_dir),
291
+ local_dir_use_symlinks=False,
292
+ )
293
+ spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.1.safetensors")
294
+ gemma_root = snapshot_download(repo_id=GEMMA_REPO)
295
+
296
+
297
+ # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
298
+ # LoRA repo + download the requested LoRA adapters
299
+ LORA_REPO = "dagloop5/LoRA"
300
+
301
+ print("=" * 80)
302
+ print("Downloading LoRA adapters from dagloop5/LoRA...")
303
+ print("=" * 80)
304
+ pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors")
305
+ general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_reasoning_I2V_V3.safetensors")
306
+ motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
307
+ dreamlay_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="DR34ML4Y_LTXXX_PREVIEW_RC1.safetensors") # m15510n4ry, bl0wj0b, d0ubl3_bj, d0gg1e, c0wg1rl
308
+ mself_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="Furry Hyper Masturbation - LTX-2 I2V v1.safetensors") # Hyperfap
309
+ dramatic_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2.3 - Orgasm.safetensors") # "[He | She] is having am orgasm." (am or an?)
310
+ fluid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_CREAMPIE_ANIMATION-V0.1.safetensors") # cum
311
+ liquid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="liquid_wet_dr1pp_ltx2_v1.0_scaled.safetensors") # wet dr1pp
312
+ demopose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="clapping-cheeks-audio-v001-alpha.safetensors")
313
+ voice_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="hentai_voice_ltx23.safetensors")
314
+ realism_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="FurryenhancerLTX2.3V1.215.safetensors")
315
+ transition_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2_takerpov_lora_v1.2.safetensors") # takerpov1, taker pov
316
+ physics_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_Better_Physics_PhysLTX.safetensors")
317
+ reasoning_lora_path = hf_hub_download(repo_id="TenStrip/LTX2.3_Distilled_Lora_1.1_Experiments", filename="TenStrip/LTX2.3_Distilled_Lora_1.1_Experiments")
318
+
319
+ print(f"Pose LoRA: {pose_lora_path}")
320
+ print(f"General LoRA: {general_lora_path}")
321
+ print(f"Motion LoRA: {motion_lora_path}")
322
+ print(f"Dreamlay LoRA: {dreamlay_lora_path}")
323
+ print(f"Mself LoRA: {mself_lora_path}")
324
+ print(f"Dramatic LoRA: {dramatic_lora_path}")
325
+ print(f"Fluid LoRA: {fluid_lora_path}")
326
+ print(f"Liquid LoRA: {liquid_lora_path}")
327
+ print(f"Demopose LoRA: {demopose_lora_path}")
328
+ print(f"Voice LoRA: {voice_lora_path}")
329
+ print(f"Realism LoRA: {realism_lora_path}")
330
+ print(f"Transition LoRA: {transition_lora_path}")
331
+ print(f"Physics LoRA: {physics_lora_path}")
332
+ print(f"Reasoning LoRA: {reasoning_lora_path}")
333
+ # ----------------------------------------------------------------
334
+
335
+ print(f"Checkpoint: {checkpoint_path}")
336
+ print(f"Spatial upsampler: {spatial_upsampler_path}")
337
+ print(f"[Gemma] Root ready: {gemma_root}")
338
+
339
+ # Initialize pipeline WITH text encoder and optional audio support
340
+ # ---- Replace block (pipeline init) lines 275-281 ----
341
+ pipeline = LTX23DistilledA2VPipeline(
342
+ distilled_checkpoint_path=checkpoint_path,
343
+ spatial_upsampler_path=spatial_upsampler_path,
344
+ gemma_root=gemma_root,
345
+ loras=[],
346
+ quantization=QuantizationPolicy.fp8_cast(), # keep FP8 quantization unchanged
347
+ )
348
+ # ----------------------------------------------------------------
349
+
350
+ def _make_lora_key(pose_strength: float, general_strength: float, motion_strength: float, dreamlay_strength: float, mself_strength: float, dramatic_strength: float, fluid_strength: float, liquid_strength: float, demopose_strength: float, voice_strength: float, realism_strength: float, transition_strength: float, physics_strength: float, reasoning_strength: float) -> tuple[str, str]:
351
+ rp = round(float(pose_strength), 2)
352
+ rg = round(float(general_strength), 2)
353
+ rm = round(float(motion_strength), 2)
354
+ rd = round(float(dreamlay_strength), 2)
355
+ rs = round(float(mself_strength), 2)
356
+ rr = round(float(dramatic_strength), 2)
357
+ rf = round(float(fluid_strength), 2)
358
+ rl = round(float(liquid_strength), 2)
359
+ ro = round(float(demopose_strength), 2)
360
+ rv = round(float(voice_strength), 2)
361
+ re = round(float(realism_strength), 2)
362
+ rt = round(float(transition_strength), 2)
363
+ ry = round(float(physics_strength), 2)
364
+ ri = round(float(reasoning_strength), 2)
365
+ key_str = f"{pose_lora_path}:{rp}|{general_lora_path}:{rg}|{motion_lora_path}:{rm}|{dreamlay_lora_path}:{rd}|{mself_lora_path}:{rs}|{dramatic_lora_path}:{rr}|{fluid_lora_path}:{rf}|{liquid_lora_path}:{rl}|{demopose_lora_path}:{ro}|{voice_lora_path}:{rv}|{realism_lora_path}:{re}|{transition_lora_path}:{rt}|{physics_lora_path}:{ry}|{reasoning_lora_path}:{ri}"
366
+ key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
367
+ return key, key_str
368
+
369
+
370
+ def prepare_lora_cache(
371
+ pose_strength: float,
372
+ general_strength: float,
373
+ motion_strength: float,
374
+ dreamlay_strength: float,
375
+ mself_strength: float,
376
+ dramatic_strength: float,
377
+ fluid_strength: float,
378
+ liquid_strength: float,
379
+ demopose_strength: float,
380
+ voice_strength: float,
381
+ realism_strength: float,
382
+ transition_strength: float,
383
+ physics_strength: float,
384
+ reasoning_strength: float,
385
+ progress=gr.Progress(track_tqdm=True),
386
+ ):
387
+ """
388
+ CPU-only step:
389
+ - checks cache
390
+ - loads cached fused transformer state_dict, or
391
+ - builds fused transformer on CPU and saves it
392
+ The resulting state_dict is stored in memory and can be applied later.
393
+ """
394
+ global PENDING_LORA_KEY, PENDING_LORA_STATE, PENDING_LORA_STATUS
395
+
396
+ ledger = pipeline.model_ledger
397
+ key, _ = _make_lora_key(pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength, physics_strength, reasoning_strength)
398
+ cache_path = LORA_CACHE_DIR / f"{key}.safetensors"
399
+
400
+ progress(0.05, desc="Preparing LoRA state")
401
+ if cache_path.exists():
402
+ try:
403
+ progress(0.20, desc="Loading cached fused state")
404
+ state = load_file(str(cache_path))
405
+ PENDING_LORA_KEY = key
406
+ PENDING_LORA_STATE = state
407
+ PENDING_LORA_STATUS = f"Loaded cached LoRA state: {cache_path.name}"
408
+ return PENDING_LORA_STATUS
409
+ except Exception as e:
410
+ print(f"[LoRA] Cache load failed: {type(e).__name__}: {e}")
411
+
412
+ entries = [
413
+ (pose_lora_path, round(float(pose_strength), 2)),
414
+ (general_lora_path, round(float(general_strength), 2)),
415
+ (motion_lora_path, round(float(motion_strength), 2)),
416
+ (dreamlay_lora_path, round(float(dreamlay_strength), 2)),
417
+ (mself_lora_path, round(float(mself_strength), 2)),
418
+ (dramatic_lora_path, round(float(dramatic_strength), 2)),
419
+ (fluid_lora_path, round(float(fluid_strength), 2)),
420
+ (liquid_lora_path, round(float(liquid_strength), 2)),
421
+ (demopose_lora_path, round(float(demopose_strength), 2)),
422
+ (voice_lora_path, round(float(voice_strength), 2)),
423
+ (realism_lora_path, round(float(realism_strength), 2)),
424
+ (transition_lora_path, round(float(transition_strength), 2)),
425
+ (physics_lora_path, round(float(physics_strength), 2)),
426
+ (reasoning_lora_path, round(float(reasoning_strength), 2)),
427
+ ]
428
+ loras_for_builder = [
429
+ LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
430
+ for path, strength in entries
431
+ if path is not None and float(strength) != 0.0
432
+ ]
433
+
434
+ if not loras_for_builder:
435
+ PENDING_LORA_KEY = None
436
+ PENDING_LORA_STATE = None
437
+ PENDING_LORA_STATUS = "No non-zero LoRA strengths selected; nothing to prepare."
438
+ return PENDING_LORA_STATUS
439
+
440
+ tmp_ledger = None
441
+ new_transformer_cpu = None
442
+ try:
443
+ progress(0.35, desc="Building fused CPU transformer")
444
+ tmp_ledger = pipeline.model_ledger.__class__(
445
+ dtype=ledger.dtype,
446
+ device=torch.device("cpu"),
447
+ checkpoint_path=str(checkpoint_path),
448
+ spatial_upsampler_path=str(spatial_upsampler_path),
449
+ gemma_root_path=str(gemma_root),
450
+ loras=tuple(loras_for_builder),
451
+ quantization=getattr(ledger, "quantization", None),
452
+ )
453
+ new_transformer_cpu = tmp_ledger.transformer()
454
+
455
+ progress(0.70, desc="Extracting fused state_dict")
456
+ state = {
457
+ k: v.detach().cpu().contiguous()
458
+ for k, v in new_transformer_cpu.state_dict().items()
459
+ }
460
+ save_file(state, str(cache_path))
461
+
462
+ PENDING_LORA_KEY = key
463
+ PENDING_LORA_STATE = state
464
+ PENDING_LORA_STATUS = f"Built and cached LoRA state: {cache_path.name}"
465
+ return PENDING_LORA_STATUS
466
+
467
+ except Exception as e:
468
+ import traceback
469
+ print(f"[LoRA] Prepare failed: {type(e).__name__}: {e}")
470
+ print(traceback.format_exc())
471
+ PENDING_LORA_KEY = None
472
+ PENDING_LORA_STATE = None
473
+ PENDING_LORA_STATUS = f"LoRA prepare failed: {type(e).__name__}: {e}"
474
+ return PENDING_LORA_STATUS
475
+
476
+ finally:
477
+ try:
478
+ del new_transformer_cpu
479
+ except Exception:
480
+ pass
481
+ try:
482
+ del tmp_ledger
483
+ except Exception:
484
+ pass
485
+ gc.collect()
486
+
487
+
488
+ def apply_prepared_lora_state_to_pipeline():
489
+ """
490
+ Fast step: copy the already prepared CPU state into the live transformer.
491
+ This is the only part that should remain near generation time.
492
+ """
493
+ global current_lora_key, PENDING_LORA_KEY, PENDING_LORA_STATE
494
+
495
+ if PENDING_LORA_STATE is None or PENDING_LORA_KEY is None:
496
+ print("[LoRA] No prepared LoRA state available; skipping.")
497
+ return False
498
+
499
+ if current_lora_key == PENDING_LORA_KEY:
500
+ print("[LoRA] Prepared LoRA state already active; skipping.")
501
+ return True
502
+
503
+ existing_transformer = _transformer
504
+ with torch.no_grad():
505
+ missing, unexpected = existing_transformer.load_state_dict(PENDING_LORA_STATE, strict=False)
506
+ if missing or unexpected:
507
+ print(f"[LoRA] load_state_dict mismatch: missing={len(missing)}, unexpected={len(unexpected)}")
508
+
509
+ current_lora_key = PENDING_LORA_KEY
510
+ print("[LoRA] Prepared LoRA state applied to the pipeline.")
511
+ return True
512
+
513
+ # ---- REPLACE PRELOAD BLOCK START ----
514
+ # Preload all models for ZeroGPU tensor packing.
515
+ print("Preloading all models (including Gemma and audio components)...")
516
+ ledger = pipeline.model_ledger
517
+
518
+ # Save the original factory methods so we can rebuild individual components later.
519
+ # These are bound callables on ledger that will call the builder when invoked.
520
+ _orig_transformer_factory = ledger.transformer
521
+ _orig_video_encoder_factory = ledger.video_encoder
522
+ _orig_video_decoder_factory = ledger.video_decoder
523
+ _orig_audio_encoder_factory = ledger.audio_encoder
524
+ _orig_audio_decoder_factory = ledger.audio_decoder
525
+ _orig_vocoder_factory = ledger.vocoder
526
+ _orig_spatial_upsampler_factory = ledger.spatial_upsampler
527
+ _orig_text_encoder_factory = ledger.text_encoder
528
+ _orig_gemma_embeddings_factory = ledger.gemma_embeddings_processor
529
+
530
+ # Call the original factories once to create the cached instances we will serve by default.
531
+ _transformer = _orig_transformer_factory()
532
+ _video_encoder = _orig_video_encoder_factory()
533
+ _video_decoder = _orig_video_decoder_factory()
534
+ _audio_encoder = _orig_audio_encoder_factory()
535
+ _audio_decoder = _orig_audio_decoder_factory()
536
+ _vocoder = _orig_vocoder_factory()
537
+ _spatial_upsampler = _orig_spatial_upsampler_factory()
538
+ _text_encoder = _orig_text_encoder_factory()
539
+ _embeddings_processor = _orig_gemma_embeddings_factory()
540
+
541
+ # Replace ledger methods with lightweight lambdas that return the cached instances.
542
+ # We keep the original factories above so we can call them later to rebuild components.
543
+ ledger.transformer = lambda: _transformer
544
+ ledger.video_encoder = lambda: _video_encoder
545
+ ledger.video_decoder = lambda: _video_decoder
546
+ ledger.audio_encoder = lambda: _audio_encoder
547
+ ledger.audio_decoder = lambda: _audio_decoder
548
+ ledger.vocoder = lambda: _vocoder
549
+ ledger.spatial_upsampler = lambda: _spatial_upsampler
550
+ ledger.text_encoder = lambda: _text_encoder
551
+ ledger.gemma_embeddings_processor = lambda: _embeddings_processor
552
+
553
+ print("All models preloaded (including Gemma text encoder and audio encoder)!")
554
+ # ---- REPLACE PRELOAD BLOCK END ----
555
+
556
+ print("=" * 80)
557
+ print("Pipeline ready!")
558
+ print("=" * 80)
559
+
560
+
561
+ def log_memory(tag: str):
562
+ if torch.cuda.is_available():
563
+ allocated = torch.cuda.memory_allocated() / 1024**3
564
+ peak = torch.cuda.max_memory_allocated() / 1024**3
565
+ free, total = torch.cuda.mem_get_info()
566
+ print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
567
+
568
+
569
+ def detect_aspect_ratio(image) -> str:
570
+ if image is None:
571
+ return "16:9"
572
+ if hasattr(image, "size"):
573
+ w, h = image.size
574
+ elif hasattr(image, "shape"):
575
+ h, w = image.shape[:2]
576
+ else:
577
+ return "16:9"
578
+ ratio = w / h
579
+ candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
580
+ return min(candidates, key=lambda k: abs(ratio - candidates[k]))
581
+
582
+
583
+ def on_image_upload(first_image, last_image, high_res):
584
+ ref_image = first_image if first_image is not None else last_image
585
+ aspect = detect_aspect_ratio(ref_image)
586
+ tier = "high" if high_res else "low"
587
+ w, h = RESOLUTIONS[tier][aspect]
588
+ return gr.update(value=w), gr.update(value=h)
589
+
590
+
591
+ def on_highres_toggle(first_image, last_image, high_res):
592
+ ref_image = first_image if first_image is not None else last_image
593
+ aspect = detect_aspect_ratio(ref_image)
594
+ tier = "high" if high_res else "low"
595
+ w, h = RESOLUTIONS[tier][aspect]
596
+ return gr.update(value=w), gr.update(value=h)
597
+
598
+
599
+ def get_gpu_duration(
600
+ first_image,
601
+ last_image,
602
+ input_audio,
603
+ prompt: str,
604
+ duration: float,
605
+ gpu_duration: float,
606
+ enhance_prompt: bool = True,
607
+ seed: int = 42,
608
+ randomize_seed: bool = True,
609
+ height: int = 1024,
610
+ width: int = 1536,
611
+ pose_strength: float = 0.0,
612
+ general_strength: float = 0.0,
613
+ motion_strength: float = 0.0,
614
+ dreamlay_strength: float = 0.0,
615
+ mself_strength: float = 0.0,
616
+ dramatic_strength: float = 0.0,
617
+ fluid_strength: float = 0.0,
618
+ liquid_strength: float = 0.0,
619
+ demopose_strength: float = 0.0,
620
+ voice_strength: float = 0.0,
621
+ realism_strength: float = 0.0,
622
+ transition_strength: float = 0.0,
623
+ physics_strength: float = 0.0,
624
+ reasoning_strength: float = 0.0,
625
+ progress=None,
626
+ ):
627
+ return int(gpu_duration)
628
+
629
+ @spaces.GPU(duration=get_gpu_duration)
630
+ @torch.inference_mode()
631
+ def generate_video(
632
+ first_image,
633
+ last_image,
634
+ input_audio,
635
+ prompt: str,
636
+ duration: float,
637
+ gpu_duration: float,
638
+ enhance_prompt: bool = True,
639
+ seed: int = 42,
640
+ randomize_seed: bool = True,
641
+ height: int = 1024,
642
+ width: int = 1536,
643
+ pose_strength: float = 0.0,
644
+ general_strength: float = 0.0,
645
+ motion_strength: float = 0.0,
646
+ dreamlay_strength: float = 0.0,
647
+ mself_strength: float = 0.0,
648
+ dramatic_strength: float = 0.0,
649
+ fluid_strength: float = 0.0,
650
+ liquid_strength: float = 0.0,
651
+ demopose_strength: float = 0.0,
652
+ voice_strength: float = 0.0,
653
+ realism_strength: float = 0.0,
654
+ transition_strength: float = 0.0,
655
+ physics_strength: float = 0.0,
656
+ reasoning_strength: float = 0.0,
657
+ progress=gr.Progress(track_tqdm=True),
658
+ ):
659
+ try:
660
+ torch.cuda.reset_peak_memory_stats()
661
+ log_memory("start")
662
+
663
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
664
+
665
+ frame_rate = DEFAULT_FRAME_RATE
666
+ num_frames = int(duration * frame_rate) + 1
667
+ num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
668
+
669
+ print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
670
+
671
+ images = []
672
+ output_dir = Path("outputs")
673
+ output_dir.mkdir(exist_ok=True)
674
+
675
+ if first_image is not None:
676
+ temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
677
+ if hasattr(first_image, "save"):
678
+ first_image.save(temp_first_path)
679
+ else:
680
+ temp_first_path = Path(first_image)
681
+ images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
682
+
683
+ if last_image is not None:
684
+ temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
685
+ if hasattr(last_image, "save"):
686
+ last_image.save(temp_last_path)
687
+ else:
688
+ temp_last_path = Path(last_image)
689
+ images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
690
+
691
+ tiling_config = TilingConfig.default()
692
+ video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
693
+
694
+ log_memory("before pipeline call")
695
+
696
+ apply_prepared_lora_state_to_pipeline()
697
+
698
+ video, audio = pipeline(
699
+ prompt=prompt,
700
+ seed=current_seed,
701
+ height=int(height),
702
+ width=int(width),
703
+ num_frames=num_frames,
704
+ frame_rate=frame_rate,
705
+ images=images,
706
+ audio_path=input_audio,
707
+ tiling_config=tiling_config,
708
+ enhance_prompt=enhance_prompt,
709
+ )
710
+
711
+ log_memory("after pipeline call")
712
+
713
+ output_path = tempfile.mktemp(suffix=".mp4")
714
+ encode_video(
715
+ video=video,
716
+ fps=frame_rate,
717
+ audio=audio,
718
+ output_path=output_path,
719
+ video_chunks_number=video_chunks_number,
720
+ )
721
+
722
+ log_memory("after encode_video")
723
+ return str(output_path), current_seed
724
+
725
+ except Exception as e:
726
+ import traceback
727
+ log_memory("on error")
728
+ print(f"Error: {str(e)}\n{traceback.format_exc()}")
729
+ return None, current_seed
730
+
731
+
732
+ with gr.Blocks(title="LTX-2.3 Distilled") as demo:
733
+ gr.Markdown("# LTX-2.3 F2LF with Fast Audio-Video Generation with Frame Conditioning")
734
+
735
+
736
+ with gr.Row():
737
+ with gr.Column():
738
+ with gr.Row():
739
+ first_image = gr.Image(label="First Frame (Optional)", type="pil")
740
+ last_image = gr.Image(label="Last Frame (Optional)", type="pil")
741
+ input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
742
+ prompt = gr.Textbox(
743
+ label="Prompt",
744
+ info="for best results - make it as elaborate as possible",
745
+ value="Make this image come alive with cinematic motion, smooth animation",
746
+ lines=3,
747
+ placeholder="Describe the motion and animation you want...",
748
+ )
749
+ duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=30.0, value=10.0, step=0.1)
750
+
751
+
752
+ generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
753
+
754
+ with gr.Accordion("Advanced Settings", open=False):
755
+ seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
756
+ randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
757
+ with gr.Row():
758
+ width = gr.Number(label="Width", value=1536, precision=0)
759
+ height = gr.Number(label="Height", value=1024, precision=0)
760
+ with gr.Row():
761
+ enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
762
+ high_res = gr.Checkbox(label="High Resolution", value=True)
763
+ with gr.Column():
764
+ gr.Markdown("### LoRA adapter strengths (set to 0 to disable; slow and WIP)")
765
+ pose_strength = gr.Slider(
766
+ label="Anthro Enhancer strength",
767
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
768
+ )
769
+ general_strength = gr.Slider(
770
+ label="Reasoning Enhancer strength",
771
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
772
+ )
773
+ motion_strength = gr.Slider(
774
+ label="Anthro Posing Helper strength",
775
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
776
+ )
777
+ dreamlay_strength = gr.Slider(
778
+ label="Dreamlay strength",
779
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
780
+ )
781
+ mself_strength = gr.Slider(
782
+ label="Mself strength",
783
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
784
+ )
785
+ dramatic_strength = gr.Slider(
786
+ label="Dramatic strength",
787
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
788
+ )
789
+ fluid_strength = gr.Slider(
790
+ label="Fluid Helper strength",
791
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
792
+ )
793
+ liquid_strength = gr.Slider(
794
+ label="Liquid Helper strength",
795
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
796
+ )
797
+ demopose_strength = gr.Slider(
798
+ label="Audio Helper strength",
799
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
800
+ )
801
+ voice_strength = gr.Slider(
802
+ label="Voice Helper strength",
803
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
804
+ )
805
+ realism_strength = gr.Slider(
806
+ label="Anthro Realism strength",
807
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
808
+ )
809
+ transition_strength = gr.Slider(
810
+ label="POV strength",
811
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
812
+ )
813
+ physics_strength = gr.Slider(
814
+ label="Physics strength",
815
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
816
+ )
817
+ reasoning_strength = gr.Slider(
818
+ label="Distilled strength",
819
+ minimum=0.0, maximum=2.0, value=0.0, step=0.01
820
+ )
821
+ prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary")
822
+ lora_status = gr.Textbox(
823
+ label="LoRA Cache Status",
824
+ value="No LoRA state prepared yet.",
825
+ interactive=False,
826
+ )
827
+
828
+ with gr.Column():
829
+ output_video = gr.Video(label="Generated Video", autoplay=False)
830
+ gpu_duration = gr.Slider(
831
+ label="ZeroGPU duration (seconds; 10 second Img2Vid with 1024x1024 and LoRAs = ~70)",
832
+ minimum=30.0,
833
+ maximum=240.0,
834
+ value=75.0,
835
+ step=1.0,
836
+ )
837
+
838
+ gr.Examples(
839
+ examples=[
840
+ [
841
+ None,
842
+ "pinkknit.jpg",
843
+ None,
844
+ "The camera falls downward through darkness as if dropped into a tunnel. "
845
+ "As it slows, five friends wearing pink knitted hats and sunglasses lean "
846
+ "over and look down toward the camera with curious expressions. The lens "
847
+ "has a strong fisheye effect, creating a circular frame around them. They "
848
+ "crowd together closely, forming a symmetrical cluster while staring "
849
+ "directly into the lens.",
850
+ 3.0,
851
+ 80.0,
852
+ False,
853
+ 42,
854
+ True,
855
+ 1024,
856
+ 1024,
857
+ 0.0, # pose_strength (example)
858
+ 0.0, # general_strength (example)
859
+ 0.0, # motion_strength (example)
860
+ 0.0,
861
+ 0.0,
862
+ 0.0,
863
+ 0.0,
864
+ 0.0,
865
+ 0.0,
866
+ 0.0,
867
+ 0.0,
868
+ 0.0,
869
+ 0.0,
870
+ 0.0,
871
+ ],
872
+ ],
873
+ inputs=[
874
+ first_image, last_image, input_audio, prompt, duration, gpu_duration,
875
+ enhance_prompt, seed, randomize_seed, height, width,
876
+ pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength, physics_strength, reasoning_strength,
877
+ ],
878
+ )
879
+
880
+ first_image.change(
881
+ fn=on_image_upload,
882
+ inputs=[first_image, last_image, high_res],
883
+ outputs=[width, height],
884
+ )
885
+
886
+ last_image.change(
887
+ fn=on_image_upload,
888
+ inputs=[first_image, last_image, high_res],
889
+ outputs=[width, height],
890
+ )
891
+
892
+ high_res.change(
893
+ fn=on_highres_toggle,
894
+ inputs=[first_image, last_image, high_res],
895
+ outputs=[width, height],
896
+ )
897
+
898
+ prepare_lora_btn.click(
899
+ fn=prepare_lora_cache,
900
+ inputs=[pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength, physics_strength, reasoning_strength],
901
+ outputs=[lora_status],
902
+ )
903
+
904
+ generate_btn.click(
905
+ fn=generate_video,
906
+ inputs=[
907
+ first_image, last_image, input_audio, prompt, duration, gpu_duration, enhance_prompt,
908
+ seed, randomize_seed, height, width,
909
+ pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength, physics_strength, reasoning_strength,
910
+ ],
911
+ outputs=[output_video, seed],
912
+ )
913
+
914
+
915
+ css = """
916
+ .fillable{max-width: 1200px !important}
917
+ """
918
+
919
+ if __name__ == "__main__":
920
+ demo.launch(theme=gr.themes.Citrus(), css=css)