dagloop5 commited on
Commit
db543df
·
verified ·
1 Parent(s): 8294e93

Delete app(audiowip).py

Browse files
Files changed (1) hide show
  1. app(audiowip).py +0 -899
app(audiowip).py DELETED
@@ -1,899 +0,0 @@
1
- import os
2
- import subprocess
3
- import sys
4
-
5
- # Disable torch.compile / dynamo before any torch import
6
- os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
- os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
-
9
- # Install xformers for memory-efficient attention
10
- subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
-
12
- # Clone LTX-2 repo and install packages
13
- LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
- LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
-
16
- LTX_COMMIT = "ae855f8538843825f9015a419cf4ba5edaf5eec2" # known working commit with decode_video
17
-
18
- if not os.path.exists(LTX_REPO_DIR):
19
- print(f"Cloning {LTX_REPO_URL}...")
20
- subprocess.run(["git", "clone", LTX_REPO_URL, LTX_REPO_DIR], check=True)
21
- subprocess.run(["git", "checkout", LTX_COMMIT], cwd=LTX_REPO_DIR, check=True)
22
-
23
- print("Installing ltx-core and ltx-pipelines from cloned repo...")
24
- subprocess.run(
25
- [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
26
- os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
27
- "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
28
- check=True,
29
- )
30
-
31
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
32
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
33
-
34
- import logging
35
- import random
36
- import tempfile
37
- from pathlib import Path
38
- import gc
39
- import hashlib
40
-
41
- import torch
42
- torch._dynamo.config.suppress_errors = True
43
- torch._dynamo.config.disable = True
44
-
45
- import spaces
46
- import gradio as gr
47
- import numpy as np
48
- from huggingface_hub import hf_hub_download, snapshot_download
49
-
50
- from ltx_core.components.diffusion_steps import EulerDiffusionStep
51
- from ltx_core.components.noisers import GaussianNoiser
52
- from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
53
- from ltx_core.model.upsampler import upsample_video
54
- from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number, decode_video as vae_decode_video
55
- from ltx_core.quantization import QuantizationPolicy
56
- from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
57
- from ltx_pipelines.distilled import DistilledPipeline
58
- from ltx_pipelines.utils import euler_denoising_loop
59
- from ltx_pipelines.utils.args import ImageConditioningInput
60
- from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
61
- from ltx_pipelines.utils.helpers import (
62
- cleanup_memory,
63
- combined_image_conditionings,
64
- denoise_video_only,
65
- encode_prompts,
66
- simple_denoising_func,
67
- )
68
- from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
69
- from ltx_pipelines.utils.types import ModalitySpec
70
- from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
71
- from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
72
-
73
- # Force-patch xformers attention into the LTX attention module.
74
- from ltx_core.model.transformer import attention as _attn_mod
75
- print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
76
- try:
77
- from xformers.ops import memory_efficient_attention as _mea
78
- _attn_mod.memory_efficient_attention = _mea
79
- print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
80
- except Exception as e:
81
- print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
82
-
83
- logging.getLogger().setLevel(logging.INFO)
84
-
85
- MAX_SEED = np.iinfo(np.int32).max
86
- DEFAULT_PROMPT = (
87
- "An astronaut hatches from a fragile egg on the surface of the Moon, "
88
- "the shell cracking and peeling apart in gentle low-gravity motion. "
89
- "Fine lunar dust lifts and drifts outward with each movement, floating "
90
- "in slow arcs before settling back onto the ground."
91
- )
92
- DEFAULT_FRAME_RATE = 24.0
93
-
94
- # Resolution presets: (width, height)
95
- RESOLUTIONS = {
96
- "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
97
- "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
98
- }
99
-
100
-
101
- class LTX23DistilledA2VPipeline(DistilledPipeline):
102
- """DistilledPipeline with optional audio conditioning."""
103
-
104
- def __call__(
105
- self,
106
- prompt: str,
107
- seed: int,
108
- height: int,
109
- width: int,
110
- num_frames: int,
111
- frame_rate: float,
112
- images: list[ImageConditioningInput],
113
- audio_path: str | None = None,
114
- audio_mix_ratio: float = 0.35,
115
- tiling_config: TilingConfig | None = None,
116
- enhance_prompt: bool = False,
117
- ):
118
- # Standard path when no audio input is provided.
119
- print(prompt)
120
- if audio_path is None:
121
- return super().__call__(
122
- prompt=prompt,
123
- seed=seed,
124
- height=height,
125
- width=width,
126
- num_frames=num_frames,
127
- frame_rate=frame_rate,
128
- images=images,
129
- tiling_config=tiling_config,
130
- enhance_prompt=enhance_prompt,
131
- )
132
-
133
- generator = torch.Generator(device=self.device).manual_seed(seed)
134
- noiser = GaussianNoiser(generator=generator)
135
- dtype = torch.bfloat16
136
-
137
- (ctx_p,) = encode_prompts(
138
- [prompt],
139
- self.model_ledger,
140
- enhance_first_prompt=enhance_prompt,
141
- enhance_prompt_image=images[0].path if len(images) > 0 else None,
142
- )
143
- video_context, audio_context = ctx_p.video_encoding, ctx_p.audio_encoding
144
-
145
- video_duration = num_frames / frame_rate
146
- decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
147
- if decoded_audio is None:
148
- raise ValueError(f"Could not extract audio stream from {audio_path}")
149
-
150
- encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
151
-
152
- # Keep the uploaded audio as a soft prior instead of a hard target.
153
- audio_mix_ratio = float(max(0.0, min(1.0, audio_mix_ratio)))
154
- if audio_mix_ratio < 1.0:
155
- noise = torch.randn(
156
- encoded_audio_latent.shape,
157
- device=encoded_audio_latent.device,
158
- dtype=encoded_audio_latent.dtype,
159
- generator=generator,
160
- )
161
- encoded_audio_latent = (
162
- audio_mix_ratio * encoded_audio_latent
163
- + (1.0 - audio_mix_ratio) * noise
164
- )
165
-
166
- audio_shape = AudioLatentShape.from_duration(
167
- batch=1,
168
- duration=video_duration,
169
- channels=8,
170
- mel_bins=16,
171
- )
172
- expected_frames = audio_shape.frames
173
- actual_frames = encoded_audio_latent.shape[2]
174
-
175
- if actual_frames > expected_frames:
176
- encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
177
- elif actual_frames < expected_frames:
178
- pad = torch.zeros(
179
- encoded_audio_latent.shape[0],
180
- encoded_audio_latent.shape[1],
181
- expected_frames - actual_frames,
182
- encoded_audio_latent.shape[3],
183
- device=encoded_audio_latent.device,
184
- dtype=encoded_audio_latent.dtype,
185
- )
186
- encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
187
-
188
- stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
189
- stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=self.device)
190
-
191
- stage_1_output_shape = VideoPixelShape(
192
- batch=1,
193
- frames=num_frames,
194
- width=width // 2,
195
- height=height // 2,
196
- fps=frame_rate,
197
- )
198
- stage_1_conditionings = combined_image_conditionings(
199
- images=images,
200
- height=stage_1_output_shape.height,
201
- width=stage_1_output_shape.width,
202
- video_encoder=self.model_ledger.video_encoder(),
203
- dtype=dtype,
204
- device=self.device,
205
- )
206
-
207
- video_state, audio_state = self.stage(
208
- denoiser=simple_denoising_func(
209
- video_context=video_context,
210
- audio_context=audio_context,
211
- transformer=self.model_ledger.transformer(),
212
- ),
213
- sigmas=stage_1_sigmas,
214
- noiser=noiser,
215
- width=stage_1_output_shape.width,
216
- height=stage_1_output_shape.height,
217
- frames=num_frames,
218
- fps=frame_rate,
219
- video=ModalitySpec(
220
- context=video_context,
221
- conditionings=stage_1_conditionings,
222
- ),
223
- audio=ModalitySpec(
224
- context=audio_context,
225
- noise_scale=stage_1_sigmas[0].item(),
226
- initial_latent=encoded_audio_latent,
227
- ),
228
- )
229
-
230
- torch.cuda.synchronize()
231
- cleanup_memory()
232
-
233
- upscaled_video_latent = upsample_video(
234
- latent=video_state.latent[:1],
235
- video_encoder=self.model_ledger.video_encoder(),
236
- upsampler=self.model_ledger.spatial_upsampler(),
237
- )
238
-
239
- stage_2_output_shape = VideoPixelShape(
240
- batch=1,
241
- frames=num_frames,
242
- width=width,
243
- height=height,
244
- fps=frame_rate,
245
- )
246
- stage_2_conditionings = combined_image_conditionings(
247
- images=images,
248
- height=stage_2_output_shape.height,
249
- width=stage_2_output_shape.width,
250
- video_encoder=self.model_ledger.video_encoder(),
251
- dtype=dtype,
252
- device=self.device,
253
- )
254
-
255
- video_state, audio_state = self.stage(
256
- denoiser=simple_denoising_func(
257
- video_context=video_context,
258
- audio_context=audio_context,
259
- transformer=self.model_ledger.transformer(),
260
- ),
261
- sigmas=stage_2_sigmas,
262
- noiser=noiser,
263
- width=stage_2_output_shape.width,
264
- height=stage_2_output_shape.height,
265
- frames=num_frames,
266
- fps=frame_rate,
267
- video=ModalitySpec(
268
- context=video_context,
269
- conditionings=stage_2_conditionings,
270
- noise_scale=stage_2_sigmas[0].item(),
271
- initial_latent=upscaled_video_latent,
272
- ),
273
- audio=ModalitySpec(
274
- context=audio_context,
275
- noise_scale=stage_2_sigmas[0].item(),
276
- initial_latent=audio_state.latent,
277
- ),
278
- )
279
-
280
- torch.cuda.synchronize()
281
- cleanup_memory()
282
-
283
- decoded_video = self.model_ledger.video_decoder()(
284
- video_state.latent,
285
- tiling_config,
286
- generator,
287
- )
288
- decoded_audio = self.model_ledger.audio_decoder()(audio_state.latent)
289
- return decoded_video, decoded_audio
290
-
291
-
292
- # Model repos
293
- LTX_MODEL_REPO = "Lightricks/LTX-2.3"
294
- GEMMA_REPO ="rahul7star/gemma-3-12b-it-heretic"
295
-
296
-
297
- # Download model checkpoints
298
- print("=" * 80)
299
- print("Downloading LTX-2.3 distilled model + Gemma...")
300
- print("=" * 80)
301
-
302
- # LoRA cache directory and currently-applied key
303
- LORA_CACHE_DIR = Path("lora_cache")
304
- LORA_CACHE_DIR.mkdir(exist_ok=True)
305
- current_lora_key: str | None = None
306
-
307
- PENDING_LORA_KEY: str | None = None
308
- PENDING_LORA_STATE: dict[str, torch.Tensor] | None = None
309
- PENDING_LORA_STATUS: str = "No LoRA state prepared yet."
310
-
311
- weights_dir = Path("weights")
312
- weights_dir.mkdir(exist_ok=True)
313
- checkpoint_path = hf_hub_download(
314
- repo_id=LTX_MODEL_REPO,
315
- filename="ltx-2.3-22b-distilled.safetensors",
316
- local_dir=str(weights_dir),
317
- local_dir_use_symlinks=False,
318
- )
319
- spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
320
- gemma_root = snapshot_download(repo_id=GEMMA_REPO)
321
-
322
- # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
323
- # LoRA repo + download the requested LoRA adapters
324
- LORA_REPO = "dagloop5/LoRA"
325
-
326
- print("=" * 80)
327
- print("Downloading LoRA adapters from dagloop5/LoRA...")
328
- print("=" * 80)
329
- pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors")
330
- general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_Reasoning_V1.safetensors")
331
- motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
332
- dreamlay_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="DR34ML4Y_LTXXX_PREVIEW_RC1.safetensors") # m15510n4ry, bl0wj0b, d0ubl3_bj, d0gg1e, c0wg1rl
333
- mself_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="Furry Hyper Masturbation - LTX-2 I2V v1.safetensors") # Hyperfap
334
- dramatic_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2.3 - Orgasm.safetensors") # "[He | She] is having am orgasm." (am or an?)
335
- fluid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="cr3ampi3_animation_i2v_ltx2_v1.0.safetensors") # cr3ampi3 animation., missionary animation, doggystyle bouncy animation, double penetration animation
336
- liquid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="liquid_wet_dr1pp_ltx2_v1.0_scaled.safetensors") # wet dr1pp
337
- demopose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="ltx23__demopose_d3m0p0s3.safetensors") # d3m0p0s3
338
-
339
- print(f"Pose LoRA: {pose_lora_path}")
340
- print(f"General LoRA: {general_lora_path}")
341
- print(f"Motion LoRA: {motion_lora_path}")
342
- print(f"Dreamlay LoRA: {dreamlay_lora_path}")
343
- print(f"Mself LoRA: {mself_lora_path}")
344
- print(f"Dramatic LoRA: {dramatic_lora_path}")
345
- print(f"Fluid LoRA: {fluid_lora_path}")
346
- print(f"Liquid LoRA: {liquid_lora_path}")
347
- print(f"Demopose LoRA: {demopose_lora_path}")
348
- # ----------------------------------------------------------------
349
-
350
- print(f"Checkpoint: {checkpoint_path}")
351
- print(f"Spatial upsampler: {spatial_upsampler_path}")
352
- print(f"Gemma root: {gemma_root}")
353
-
354
- # Initialize pipeline WITH text encoder and optional audio support
355
- # ---- Replace block (pipeline init) lines 275-281 ----
356
- pipeline = LTX23DistilledA2VPipeline(
357
- distilled_checkpoint_path=checkpoint_path,
358
- spatial_upsampler_path=spatial_upsampler_path,
359
- gemma_root=gemma_root,
360
- loras=[],
361
- quantization=QuantizationPolicy.fp8_cast(), # keep FP8 quantization unchanged
362
- )
363
- # ----------------------------------------------------------------
364
-
365
- def _make_lora_key(pose_strength: float, general_strength: float, motion_strength: float, dreamlay_strength: float, mself_strength: float, dramatic_strength: float, fluid_strength: float, liquid_strength: float, demopose_strength: float) -> tuple[str, str]:
366
- rp = round(float(pose_strength), 2)
367
- rg = round(float(general_strength), 2)
368
- rm = round(float(motion_strength), 2)
369
- rd = round(float(dreamlay_strength), 2)
370
- rs = round(float(mself_strength), 2)
371
- rr = round(float(dramatic_strength), 2)
372
- rf = round(float(fluid_strength), 2)
373
- rl = round(float(liquid_strength), 2)
374
- ro = round(float(demopose_strength), 2)
375
- key_str = f"{pose_lora_path}:{rp}|{general_lora_path}:{rg}|{motion_lora_path}:{rm}|{dreamlay_lora_path}:{rd}|{mself_lora_path}:{rs}|{dramatic_lora_path}:{rr}|{fluid_lora_path}:{rf}|{liquid_lora_path}:{rl}|{demopose_lora_path}:{ro}"
376
- key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
377
- return key, key_str
378
-
379
-
380
- def prepare_lora_cache(
381
- pose_strength: float,
382
- general_strength: float,
383
- motion_strength: float,
384
- dreamlay_strength: float,
385
- mself_strength: float,
386
- dramatic_strength: float,
387
- fluid_strength: float,
388
- liquid_strength: float,
389
- demopose_strength: float,
390
- progress=gr.Progress(track_tqdm=True),
391
- ):
392
- """
393
- CPU-only step:
394
- - checks cache
395
- - loads cached fused transformer state_dict, or
396
- - builds fused transformer on CPU and saves it
397
- The resulting state_dict is stored in memory and can be applied later.
398
- """
399
- global PENDING_LORA_KEY, PENDING_LORA_STATE, PENDING_LORA_STATUS
400
-
401
- ledger = pipeline.model_ledger
402
- key, _ = _make_lora_key(pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength)
403
- cache_path = LORA_CACHE_DIR / f"{key}.pt"
404
-
405
- progress(0.05, desc="Preparing LoRA state")
406
- if cache_path.exists():
407
- try:
408
- progress(0.20, desc="Loading cached fused state")
409
- state = torch.load(cache_path, map_location="cpu")
410
- PENDING_LORA_KEY = key
411
- PENDING_LORA_STATE = state
412
- PENDING_LORA_STATUS = f"Loaded cached LoRA state: {cache_path.name}"
413
- return PENDING_LORA_STATUS
414
- except Exception as e:
415
- print(f"[LoRA] Cache load failed: {type(e).__name__}: {e}")
416
-
417
- entries = [
418
- (pose_lora_path, round(float(pose_strength), 2)),
419
- (general_lora_path, round(float(general_strength), 2)),
420
- (motion_lora_path, round(float(motion_strength), 2)),
421
- (dreamlay_lora_path, round(float(dreamlay_strength), 2)),
422
- (mself_lora_path, round(float(mself_strength), 2)),
423
- (dramatic_lora_path, round(float(dramatic_strength), 2)),
424
- (fluid_lora_path, round(float(fluid_strength), 2)),
425
- (liquid_lora_path, round(float(liquid_strength), 2)),
426
- (demopose_lora_path, round(float(demopose_strength), 2)),
427
- ]
428
- loras_for_builder = [
429
- LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
430
- for path, strength in entries
431
- if path is not None and float(strength) != 0.0
432
- ]
433
-
434
- if not loras_for_builder:
435
- PENDING_LORA_KEY = None
436
- PENDING_LORA_STATE = None
437
- PENDING_LORA_STATUS = "No non-zero LoRA strengths selected; nothing to prepare."
438
- return PENDING_LORA_STATUS
439
-
440
- tmp_ledger = None
441
- new_transformer_cpu = None
442
- try:
443
- progress(0.35, desc="Building fused CPU transformer")
444
- tmp_ledger = pipeline.model_ledger.__class__(
445
- dtype=ledger.dtype,
446
- device=torch.device("cpu"),
447
- checkpoint_path=str(checkpoint_path),
448
- spatial_upsampler_path=str(spatial_upsampler_path),
449
- gemma_root_path=str(gemma_root),
450
- loras=tuple(loras_for_builder),
451
- quantization=getattr(ledger, "quantization", None),
452
- )
453
- new_transformer_cpu = tmp_ledger.transformer()
454
-
455
- progress(0.70, desc="Extracting fused state_dict")
456
- state = new_transformer_cpu.state_dict()
457
- torch.save(state, cache_path)
458
-
459
- PENDING_LORA_KEY = key
460
- PENDING_LORA_STATE = state
461
- PENDING_LORA_STATUS = f"Built and cached LoRA state: {cache_path.name}"
462
- return PENDING_LORA_STATUS
463
-
464
- except Exception as e:
465
- import traceback
466
- print(f"[LoRA] Prepare failed: {type(e).__name__}: {e}")
467
- print(traceback.format_exc())
468
- PENDING_LORA_KEY = None
469
- PENDING_LORA_STATE = None
470
- PENDING_LORA_STATUS = f"LoRA prepare failed: {type(e).__name__}: {e}"
471
- return PENDING_LORA_STATUS
472
-
473
- finally:
474
- try:
475
- del new_transformer_cpu
476
- except Exception:
477
- pass
478
- try:
479
- del tmp_ledger
480
- except Exception:
481
- pass
482
- gc.collect()
483
-
484
-
485
- def apply_prepared_lora_state_to_pipeline():
486
- """
487
- Fast step: copy the already prepared CPU state into the live transformer.
488
- This is the only part that should remain near generation time.
489
- """
490
- global current_lora_key, PENDING_LORA_KEY, PENDING_LORA_STATE
491
-
492
- if PENDING_LORA_STATE is None or PENDING_LORA_KEY is None:
493
- print("[LoRA] No prepared LoRA state available; skipping.")
494
- return False
495
-
496
- if current_lora_key == PENDING_LORA_KEY:
497
- print("[LoRA] Prepared LoRA state already active; skipping.")
498
- return True
499
-
500
- existing_transformer = _transformer
501
- existing_params = {name: param for name, param in existing_transformer.named_parameters()}
502
- existing_buffers = {name: buf for name, buf in existing_transformer.named_buffers()}
503
-
504
- with torch.no_grad():
505
- for k, v in PENDING_LORA_STATE.items():
506
- if k in existing_params:
507
- existing_params[k].data.copy_(v.to(existing_params[k].device))
508
- elif k in existing_buffers:
509
- existing_buffers[k].data.copy_(v.to(existing_buffers[k].device))
510
-
511
- current_lora_key = PENDING_LORA_KEY
512
- print("[LoRA] Prepared LoRA state applied to the pipeline.")
513
- return True
514
-
515
- # ---- REPLACE PRELOAD BLOCK START ----
516
- # Preload all models for ZeroGPU tensor packing.
517
- print("Preloading all models (including Gemma and audio components)...")
518
- ledger = pipeline.model_ledger
519
-
520
- # Save the original factory methods so we can rebuild individual components later.
521
- # These are bound callables on ledger that will call the builder when invoked.
522
- _orig_transformer_factory = ledger.transformer
523
- _orig_video_encoder_factory = ledger.video_encoder
524
- _orig_video_decoder_factory = ledger.video_decoder
525
- _orig_audio_encoder_factory = ledger.audio_encoder
526
- _orig_audio_decoder_factory = ledger.audio_decoder
527
- _orig_vocoder_factory = ledger.vocoder
528
- _orig_spatial_upsampler_factory = ledger.spatial_upsampler
529
- _orig_text_encoder_factory = ledger.text_encoder
530
- _orig_gemma_embeddings_factory = ledger.gemma_embeddings_processor
531
-
532
- # Call the original factories once to create the cached instances we will serve by default.
533
- _transformer = _orig_transformer_factory()
534
- _video_encoder = _orig_video_encoder_factory()
535
- _video_decoder = _orig_video_decoder_factory()
536
- _audio_encoder = _orig_audio_encoder_factory()
537
- _audio_decoder = _orig_audio_decoder_factory()
538
- _vocoder = _orig_vocoder_factory()
539
- _spatial_upsampler = _orig_spatial_upsampler_factory()
540
- _text_encoder = _orig_text_encoder_factory()
541
- _embeddings_processor = _orig_gemma_embeddings_factory()
542
-
543
- # Replace ledger methods with lightweight lambdas that return the cached instances.
544
- # We keep the original factories above so we can call them later to rebuild components.
545
- ledger.transformer = lambda: _transformer
546
- ledger.video_encoder = lambda: _video_encoder
547
- ledger.video_decoder = lambda: _video_decoder
548
- ledger.audio_encoder = lambda: _audio_encoder
549
- ledger.audio_decoder = lambda: _audio_decoder
550
- ledger.vocoder = lambda: _vocoder
551
- ledger.spatial_upsampler = lambda: _spatial_upsampler
552
- ledger.text_encoder = lambda: _text_encoder
553
- ledger.gemma_embeddings_processor = lambda: _embeddings_processor
554
-
555
- print("All models preloaded (including Gemma text encoder and audio encoder)!")
556
- # ---- REPLACE PRELOAD BLOCK END ----
557
-
558
- print("=" * 80)
559
- print("Pipeline ready!")
560
- print("=" * 80)
561
-
562
-
563
- def log_memory(tag: str):
564
- if torch.cuda.is_available():
565
- allocated = torch.cuda.memory_allocated() / 1024**3
566
- peak = torch.cuda.max_memory_allocated() / 1024**3
567
- free, total = torch.cuda.mem_get_info()
568
- print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
569
-
570
-
571
- def detect_aspect_ratio(image) -> str:
572
- if image is None:
573
- return "16:9"
574
- if hasattr(image, "size"):
575
- w, h = image.size
576
- elif hasattr(image, "shape"):
577
- h, w = image.shape[:2]
578
- else:
579
- return "16:9"
580
- ratio = w / h
581
- candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
582
- return min(candidates, key=lambda k: abs(ratio - candidates[k]))
583
-
584
-
585
- def on_image_upload(first_image, last_image, high_res):
586
- ref_image = first_image if first_image is not None else last_image
587
- aspect = detect_aspect_ratio(ref_image)
588
- tier = "high" if high_res else "low"
589
- w, h = RESOLUTIONS[tier][aspect]
590
- return gr.update(value=w), gr.update(value=h)
591
-
592
-
593
- def on_highres_toggle(first_image, last_image, high_res):
594
- ref_image = first_image if first_image is not None else last_image
595
- aspect = detect_aspect_ratio(ref_image)
596
- tier = "high" if high_res else "low"
597
- w, h = RESOLUTIONS[tier][aspect]
598
- return gr.update(value=w), gr.update(value=h)
599
-
600
-
601
- def get_gpu_duration(
602
- first_image,
603
- last_image,
604
- input_audio,
605
- audio_mix_ratio,
606
- prompt: str,
607
- duration: float,
608
- gpu_duration: float,
609
- enhance_prompt: bool = True,
610
- seed: int = 42,
611
- randomize_seed: bool = True,
612
- height: int = 1024,
613
- width: int = 1536,
614
- pose_strength: float = 0.0,
615
- general_strength: float = 0.0,
616
- motion_strength: float = 0.0,
617
- dreamlay_strength: float = 0.0,
618
- mself_strength: float = 0.0,
619
- dramatic_strength: float = 0.0,
620
- fluid_strength: float = 0.0,
621
- liquid_strength: float = 0.0,
622
- demopose_strength: float = 0.0,
623
- progress=None,
624
- ):
625
- return int(gpu_duration)
626
-
627
- @spaces.GPU(duration=get_gpu_duration)
628
- @torch.inference_mode()
629
- def generate_video(
630
- first_image,
631
- last_image,
632
- input_audio,
633
- audio_mix_ratio,
634
- prompt: str,
635
- duration: float,
636
- gpu_duration: float,
637
- enhance_prompt: bool = True,
638
- seed: int = 42,
639
- randomize_seed: bool = True,
640
- height: int = 1024,
641
- width: int = 1536,
642
- pose_strength: float = 0.0,
643
- general_strength: float = 0.0,
644
- motion_strength: float = 0.0,
645
- dreamlay_strength: float = 0.0,
646
- mself_strength: float = 0.0,
647
- dramatic_strength: float = 0.0,
648
- fluid_strength: float = 0.0,
649
- liquid_strength: float = 0.0,
650
- demopose_strength: float = 0.0,
651
- progress=gr.Progress(track_tqdm=True),
652
- ):
653
- try:
654
- torch.cuda.reset_peak_memory_stats()
655
- log_memory("start")
656
-
657
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
658
-
659
- frame_rate = DEFAULT_FRAME_RATE
660
- num_frames = int(duration * frame_rate) + 1
661
- num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
662
-
663
- print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
664
-
665
- images = []
666
- output_dir = Path("outputs")
667
- output_dir.mkdir(exist_ok=True)
668
-
669
- if first_image is not None:
670
- temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
671
- if hasattr(first_image, "save"):
672
- first_image.save(temp_first_path)
673
- else:
674
- temp_first_path = Path(first_image)
675
- images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
676
-
677
- if last_image is not None:
678
- temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
679
- if hasattr(last_image, "save"):
680
- last_image.save(temp_last_path)
681
- else:
682
- temp_last_path = Path(last_image)
683
- images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
684
-
685
- tiling_config = TilingConfig.default()
686
- video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
687
-
688
- log_memory("before pipeline call")
689
-
690
- apply_prepared_lora_state_to_pipeline()
691
-
692
- video, audio = pipeline(
693
- prompt=prompt,
694
- seed=current_seed,
695
- height=int(height),
696
- width=int(width),
697
- num_frames=num_frames,
698
- frame_rate=frame_rate,
699
- images=images,
700
- audio_path=input_audio,
701
- audio_mix_ratio=audio_mix_ratio,
702
- tiling_config=tiling_config,
703
- enhance_prompt=enhance_prompt,
704
- )
705
-
706
- log_memory("after pipeline call")
707
-
708
- output_path = tempfile.mktemp(suffix=".mp4")
709
- encode_video(
710
- video=video,
711
- fps=frame_rate,
712
- audio=audio,
713
- output_path=output_path,
714
- video_chunks_number=video_chunks_number,
715
- )
716
-
717
- log_memory("after encode_video")
718
- return str(output_path), current_seed
719
-
720
- except Exception as e:
721
- import traceback
722
- log_memory("on error")
723
- print(f"Error: {str(e)}\n{traceback.format_exc()}")
724
- return None, current_seed
725
-
726
-
727
- with gr.Blocks(title="LTX-2.3 Distilled") as demo:
728
- gr.Markdown("# LTX-2.3 F2LF with Fast Audio-Video Generation with Frame Conditioning")
729
-
730
-
731
- with gr.Row():
732
- with gr.Column():
733
- with gr.Row():
734
- first_image = gr.Image(label="First Frame (Optional)", type="pil")
735
- last_image = gr.Image(label="Last Frame (Optional)", type="pil")
736
- input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
737
- audio_mix_ratio = gr.Slider(
738
- label="Audio Conditioning Strength",
739
- minimum=0.0,
740
- maximum=1.0,
741
- value=0.35,
742
- step=0.01,
743
- info="0 = mostly ignore input audio, 1 = strongly follow input audio",
744
- )
745
- prompt = gr.Textbox(
746
- label="Prompt",
747
- info="for best results - make it as elaborate as possible",
748
- value="Make this image come alive with cinematic motion, smooth animation",
749
- lines=3,
750
- placeholder="Describe the motion and animation you want...",
751
- )
752
- duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=30.0, value=10.0, step=0.1)
753
-
754
-
755
- generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
756
-
757
- with gr.Accordion("Advanced Settings", open=False):
758
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
759
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
760
- with gr.Row():
761
- width = gr.Number(label="Width", value=1536, precision=0)
762
- height = gr.Number(label="Height", value=1024, precision=0)
763
- with gr.Row():
764
- enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
765
- high_res = gr.Checkbox(label="High Resolution", value=True)
766
- with gr.Column():
767
- gr.Markdown("### LoRA adapter strengths (set to 0 to disable)")
768
- pose_strength = gr.Slider(
769
- label="Anthro Enhancer strength",
770
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
771
- )
772
- general_strength = gr.Slider(
773
- label="Reasoning Enhancer strength",
774
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
775
- )
776
- motion_strength = gr.Slider(
777
- label="Anthro Posing Helper strength",
778
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
779
- )
780
- dreamlay_strength = gr.Slider(
781
- label="Dreamlay strength",
782
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
783
- )
784
- mself_strength = gr.Slider(
785
- label="Mself strength",
786
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
787
- )
788
- dramatic_strength = gr.Slider(
789
- label="Dramatic strength",
790
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
791
- )
792
- fluid_strength = gr.Slider(
793
- label="Fluid Helper strength",
794
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
795
- )
796
- liquid_strength = gr.Slider(
797
- label="Liquid Helper strength",
798
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
799
- )
800
- demopose_strength = gr.Slider(
801
- label="Demopose Helper strength",
802
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
803
- )
804
- prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary")
805
- lora_status = gr.Textbox(
806
- label="LoRA Cache Status",
807
- value="No LoRA state prepared yet.",
808
- interactive=False,
809
- )
810
-
811
- with gr.Column():
812
- output_video = gr.Video(label="Generated Video", autoplay=False)
813
- gpu_duration = gr.Slider(
814
- label="ZeroGPU duration (seconds)",
815
- minimum=40.0,
816
- maximum=240.0,
817
- value=85.0,
818
- step=1.0,
819
- )
820
-
821
- gr.Examples(
822
- examples=[
823
- [
824
- None,
825
- "pinkknit.jpg",
826
- None,
827
- 0.0,
828
- "The camera falls downward through darkness as if dropped into a tunnel. "
829
- "As it slows, five friends wearing pink knitted hats and sunglasses lean "
830
- "over and look down toward the camera with curious expressions. The lens "
831
- "has a strong fisheye effect, creating a circular frame around them. They "
832
- "crowd together closely, forming a symmetrical cluster while staring "
833
- "directly into the lens.",
834
- 3.0,
835
- 80.0,
836
- False,
837
- 42,
838
- True,
839
- 1024,
840
- 1024,
841
- 0.0, # pose_strength (example)
842
- 0.0, # general_strength (example)
843
- 0.0, # motion_strength (example)
844
- 0.0,
845
- 0.0,
846
- 0.0,
847
- 0.0,
848
- 0.0,
849
- 0.0,
850
- ],
851
- ],
852
- inputs=[
853
- first_image, last_image, input_audio, audio_mix_ratio, prompt, duration, gpu_duration,
854
- enhance_prompt, seed, randomize_seed, height, width,
855
- pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength,
856
- ],
857
- )
858
-
859
- first_image.change(
860
- fn=on_image_upload,
861
- inputs=[first_image, last_image, high_res],
862
- outputs=[width, height],
863
- )
864
-
865
- last_image.change(
866
- fn=on_image_upload,
867
- inputs=[first_image, last_image, high_res],
868
- outputs=[width, height],
869
- )
870
-
871
- high_res.change(
872
- fn=on_highres_toggle,
873
- inputs=[first_image, last_image, high_res],
874
- outputs=[width, height],
875
- )
876
-
877
- prepare_lora_btn.click(
878
- fn=prepare_lora_cache,
879
- inputs=[pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength],
880
- outputs=[lora_status],
881
- )
882
-
883
- generate_btn.click(
884
- fn=generate_video,
885
- inputs=[
886
- first_image, last_image, input_audio, audio_mix_ratio, prompt, duration, gpu_duration, enhance_prompt,
887
- seed, randomize_seed, height, width,
888
- pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength,
889
- ],
890
- outputs=[output_video, seed],
891
- )
892
-
893
-
894
- css = """
895
- .fillable{max-width: 1200px !important}
896
- """
897
-
898
- if __name__ == "__main__":
899
- demo.launch(theme=gr.themes.Citrus(), css=css)