dagloop5 commited on
Commit
edce51a
·
verified ·
1 Parent(s): 2622486

Delete app(currentwith12).py

Browse files
Files changed (1) hide show
  1. app(currentwith12).py +0 -893
app(currentwith12).py DELETED
@@ -1,893 +0,0 @@
1
- import os
2
- import subprocess
3
- import sys
4
-
5
- # Disable torch.compile / dynamo before any torch import
6
- os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
- os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
-
9
- # Install xformers for memory-efficient attention
10
- subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
-
12
- # Clone LTX-2 repo and install packages
13
- LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
- LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
-
16
- LTX_COMMIT = "ae855f8538843825f9015a419cf4ba5edaf5eec2" # known working commit with decode_video
17
-
18
- if not os.path.exists(LTX_REPO_DIR):
19
- print(f"Cloning {LTX_REPO_URL}...")
20
- subprocess.run(["git", "clone", LTX_REPO_URL, LTX_REPO_DIR], check=True)
21
- subprocess.run(["git", "checkout", LTX_COMMIT], cwd=LTX_REPO_DIR, check=True)
22
-
23
- print("Installing ltx-core and ltx-pipelines from cloned repo...")
24
- subprocess.run(
25
- [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
26
- os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
27
- "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
28
- check=True,
29
- )
30
-
31
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
32
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
33
-
34
- import logging
35
- import random
36
- import tempfile
37
- from pathlib import Path
38
- import gc
39
- import hashlib
40
-
41
- import torch
42
- torch._dynamo.config.suppress_errors = True
43
- torch._dynamo.config.disable = True
44
-
45
- import spaces
46
- import gradio as gr
47
- import numpy as np
48
- from huggingface_hub import hf_hub_download, snapshot_download
49
- from safetensors.torch import load_file, save_file
50
-
51
- from ltx_core.components.diffusion_steps import EulerDiffusionStep
52
- from ltx_core.components.noisers import GaussianNoiser
53
- from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
54
- from ltx_core.model.upsampler import upsample_video
55
- from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number, decode_video as vae_decode_video
56
- from ltx_core.quantization import QuantizationPolicy
57
- from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
58
- from ltx_pipelines.distilled import DistilledPipeline
59
- from ltx_pipelines.utils import euler_denoising_loop
60
- from ltx_pipelines.utils.args import ImageConditioningInput
61
- from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
62
- from ltx_pipelines.utils.helpers import (
63
- cleanup_memory,
64
- combined_image_conditionings,
65
- denoise_video_only,
66
- encode_prompts,
67
- simple_denoising_func,
68
- )
69
- from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
70
- from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
71
- from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
72
-
73
- # Force-patch xformers attention into the LTX attention module.
74
- from ltx_core.model.transformer import attention as _attn_mod
75
- print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
76
- try:
77
- from xformers.ops import memory_efficient_attention as _mea
78
- _attn_mod.memory_efficient_attention = _mea
79
- print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
80
- except Exception as e:
81
- print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
82
-
83
- logging.getLogger().setLevel(logging.INFO)
84
-
85
- MAX_SEED = np.iinfo(np.int32).max
86
- DEFAULT_PROMPT = (
87
- "An astronaut hatches from a fragile egg on the surface of the Moon, "
88
- "the shell cracking and peeling apart in gentle low-gravity motion. "
89
- "Fine lunar dust lifts and drifts outward with each movement, floating "
90
- "in slow arcs before settling back onto the ground."
91
- )
92
- DEFAULT_FRAME_RATE = 24.0
93
-
94
- # Resolution presets: (width, height)
95
- RESOLUTIONS = {
96
- "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024), "9:7": (1408, 1088), "7:9": (1088, 1408), "19:13": (1472, 1008), "13:19": (1008, 1472)},
97
- "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768), "9:7": (704, 544), "7:9": (544, 704), "19:13": (736, 504), "13:19": (504, 736)},
98
- }
99
-
100
-
101
- class LTX23DistilledA2VPipeline(DistilledPipeline):
102
- """DistilledPipeline with optional audio conditioning."""
103
-
104
- def __call__(
105
- self,
106
- prompt: str,
107
- seed: int,
108
- height: int,
109
- width: int,
110
- num_frames: int,
111
- frame_rate: float,
112
- images: list[ImageConditioningInput],
113
- audio_path: str | None = None,
114
- tiling_config: TilingConfig | None = None,
115
- enhance_prompt: bool = False,
116
- ):
117
- # Standard path when no audio input is provided.
118
- print(prompt)
119
- if audio_path is None:
120
- return super().__call__(
121
- prompt=prompt,
122
- seed=seed,
123
- height=height,
124
- width=width,
125
- num_frames=num_frames,
126
- frame_rate=frame_rate,
127
- images=images,
128
- tiling_config=tiling_config,
129
- enhance_prompt=enhance_prompt,
130
- )
131
-
132
- generator = torch.Generator(device=self.device).manual_seed(seed)
133
- noiser = GaussianNoiser(generator=generator)
134
- stepper = EulerDiffusionStep()
135
- dtype = torch.bfloat16
136
-
137
- (ctx_p,) = encode_prompts(
138
- [prompt],
139
- self.model_ledger,
140
- enhance_first_prompt=enhance_prompt,
141
- enhance_prompt_image=images[0].path if len(images) > 0 else None,
142
- )
143
- video_context, audio_context = ctx_p.video_encoding, ctx_p.audio_encoding
144
-
145
- video_duration = num_frames / frame_rate
146
- decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
147
- if decoded_audio is None:
148
- raise ValueError(f"Could not extract audio stream from {audio_path}")
149
-
150
- encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
151
- audio_shape = AudioLatentShape.from_duration(batch=1, duration=video_duration, channels=8, mel_bins=16)
152
- expected_frames = audio_shape.frames
153
- actual_frames = encoded_audio_latent.shape[2]
154
-
155
- if actual_frames > expected_frames:
156
- encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
157
- elif actual_frames < expected_frames:
158
- pad = torch.zeros(
159
- encoded_audio_latent.shape[0],
160
- encoded_audio_latent.shape[1],
161
- expected_frames - actual_frames,
162
- encoded_audio_latent.shape[3],
163
- device=encoded_audio_latent.device,
164
- dtype=encoded_audio_latent.dtype,
165
- )
166
- encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
167
-
168
- video_encoder = self.model_ledger.video_encoder()
169
- transformer = self.model_ledger.transformer()
170
- stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
171
-
172
- def denoising_loop(sigmas, video_state, audio_state, stepper):
173
- return euler_denoising_loop(
174
- sigmas=sigmas,
175
- video_state=video_state,
176
- audio_state=audio_state,
177
- stepper=stepper,
178
- denoise_fn=simple_denoising_func(
179
- video_context=video_context,
180
- audio_context=audio_context,
181
- transformer=transformer,
182
- ),
183
- )
184
-
185
- stage_1_output_shape = VideoPixelShape(
186
- batch=1,
187
- frames=num_frames,
188
- width=width // 2,
189
- height=height // 2,
190
- fps=frame_rate,
191
- )
192
- stage_1_conditionings = combined_image_conditionings(
193
- images=images,
194
- height=stage_1_output_shape.height,
195
- width=stage_1_output_shape.width,
196
- video_encoder=video_encoder,
197
- dtype=dtype,
198
- device=self.device,
199
- )
200
- video_state = denoise_video_only(
201
- output_shape=stage_1_output_shape,
202
- conditionings=stage_1_conditionings,
203
- noiser=noiser,
204
- sigmas=stage_1_sigmas,
205
- stepper=stepper,
206
- denoising_loop_fn=denoising_loop,
207
- components=self.pipeline_components,
208
- dtype=dtype,
209
- device=self.device,
210
- initial_audio_latent=encoded_audio_latent,
211
- )
212
-
213
- torch.cuda.synchronize()
214
- cleanup_memory()
215
-
216
- upscaled_video_latent = upsample_video(
217
- latent=video_state.latent[:1],
218
- video_encoder=video_encoder,
219
- upsampler=self.model_ledger.spatial_upsampler(),
220
- )
221
- stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=self.device)
222
- stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate)
223
- stage_2_conditionings = combined_image_conditionings(
224
- images=images,
225
- height=stage_2_output_shape.height,
226
- width=stage_2_output_shape.width,
227
- video_encoder=video_encoder,
228
- dtype=dtype,
229
- device=self.device,
230
- )
231
- video_state = denoise_video_only(
232
- output_shape=stage_2_output_shape,
233
- conditionings=stage_2_conditionings,
234
- noiser=noiser,
235
- sigmas=stage_2_sigmas,
236
- stepper=stepper,
237
- denoising_loop_fn=denoising_loop,
238
- components=self.pipeline_components,
239
- dtype=dtype,
240
- device=self.device,
241
- noise_scale=stage_2_sigmas[0],
242
- initial_video_latent=upscaled_video_latent,
243
- initial_audio_latent=encoded_audio_latent,
244
- )
245
-
246
- torch.cuda.synchronize()
247
- del transformer
248
- del video_encoder
249
- cleanup_memory()
250
-
251
- decoded_video = vae_decode_video(
252
- video_state.latent,
253
- self.model_ledger.video_decoder(),
254
- tiling_config,
255
- generator,
256
- )
257
- original_audio = Audio(
258
- waveform=decoded_audio.waveform.squeeze(0),
259
- sampling_rate=decoded_audio.sampling_rate,
260
- )
261
- return decoded_video, original_audio
262
-
263
-
264
- # Model repos
265
- LTX_MODEL_REPO = "Lightricks/LTX-2.3"
266
- GEMMA_REPO ="Lightricks/gemma-3-12b-it-qat-q4_0-unquantized"
267
-
268
-
269
- # Download model checkpoints
270
- print("=" * 80)
271
- print("Downloading LTX-2.3 distilled model + Gemma...")
272
- print("=" * 80)
273
-
274
- # LoRA cache directory and currently-applied key
275
- LORA_CACHE_DIR = Path("lora_cache")
276
- LORA_CACHE_DIR.mkdir(exist_ok=True)
277
- current_lora_key: str | None = None
278
-
279
- PENDING_LORA_KEY: str | None = None
280
- PENDING_LORA_STATE: dict[str, torch.Tensor] | None = None
281
- PENDING_LORA_STATUS: str = "No LoRA state prepared yet."
282
-
283
- weights_dir = Path("weights")
284
- weights_dir.mkdir(exist_ok=True)
285
- checkpoint_path = hf_hub_download(
286
- repo_id=LTX_MODEL_REPO,
287
- filename="ltx-2.3-22b-distilled.safetensors",
288
- local_dir=str(weights_dir),
289
- local_dir_use_symlinks=False,
290
- )
291
- spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
292
- gemma_root = snapshot_download(repo_id=GEMMA_REPO)
293
-
294
- # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
295
- # LoRA repo + download the requested LoRA adapters
296
- LORA_REPO = "dagloop5/LoRA"
297
-
298
- print("=" * 80)
299
- print("Downloading LoRA adapters from dagloop5/LoRA...")
300
- print("=" * 80)
301
- pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors")
302
- general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_VBVR_Reasoning_I2V_V2.safetensors")
303
- motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
304
- dreamlay_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="DR34ML4Y_LTXXX_PREVIEW_RC1.safetensors") # m15510n4ry, bl0wj0b, d0ubl3_bj, d0gg1e, c0wg1rl
305
- mself_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="Furry Hyper Masturbation - LTX-2 I2V v1.safetensors") # Hyperfap
306
- dramatic_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2.3 - Orgasm.safetensors") # "[He | She] is having am orgasm." (am or an?)
307
- fluid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="cr3ampi3_animation_i2v_ltx2_v1.0.safetensors") # cr3ampi3 animation., missionary animation, doggystyle bouncy animation, double penetration animation
308
- liquid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="liquid_wet_dr1pp_ltx2_v1.0_scaled.safetensors") # wet dr1pp
309
- demopose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="clapping-cheeks-audio-v001-alpha.safetensors")
310
- voice_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="hentai_voice_ltx23.safetensors")
311
- realism_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="FurryenhancerLTX2.3V1.215.safetensors")
312
- transition_lora_path = hf_hub_download(repo_id="valiantcat/LTX-2.3-Transition-LORA", filename="ltx2.3-transition.safetensors")
313
-
314
- print(f"Pose LoRA: {pose_lora_path}")
315
- print(f"General LoRA: {general_lora_path}")
316
- print(f"Motion LoRA: {motion_lora_path}")
317
- print(f"Dreamlay LoRA: {dreamlay_lora_path}")
318
- print(f"Mself LoRA: {mself_lora_path}")
319
- print(f"Dramatic LoRA: {dramatic_lora_path}")
320
- print(f"Fluid LoRA: {fluid_lora_path}")
321
- print(f"Liquid LoRA: {liquid_lora_path}")
322
- print(f"Demopose LoRA: {demopose_lora_path}")
323
- print(f"Voice LoRA: {voice_lora_path}")
324
- print(f"Realism LoRA: {realism_lora_path}")
325
- print(f"Transition LoRA: {transition_lora_path}")
326
- # ----------------------------------------------------------------
327
-
328
- print(f"Checkpoint: {checkpoint_path}")
329
- print(f"Spatial upsampler: {spatial_upsampler_path}")
330
- print(f"Gemma root: {gemma_root}")
331
-
332
- # Initialize pipeline WITH text encoder and optional audio support
333
- # ---- Replace block (pipeline init) lines 275-281 ----
334
- pipeline = LTX23DistilledA2VPipeline(
335
- distilled_checkpoint_path=checkpoint_path,
336
- spatial_upsampler_path=spatial_upsampler_path,
337
- gemma_root=gemma_root,
338
- loras=[],
339
- quantization=QuantizationPolicy.fp8_cast(), # keep FP8 quantization unchanged
340
- )
341
- # ----------------------------------------------------------------
342
-
343
- def _make_lora_key(pose_strength: float, general_strength: float, motion_strength: float, dreamlay_strength: float, mself_strength: float, dramatic_strength: float, fluid_strength: float, liquid_strength: float, demopose_strength: float, voice_strength: float, realism_strength: float, transition_strength: float) -> tuple[str, str]:
344
- rp = round(float(pose_strength), 2)
345
- rg = round(float(general_strength), 2)
346
- rm = round(float(motion_strength), 2)
347
- rd = round(float(dreamlay_strength), 2)
348
- rs = round(float(mself_strength), 2)
349
- rr = round(float(dramatic_strength), 2)
350
- rf = round(float(fluid_strength), 2)
351
- rl = round(float(liquid_strength), 2)
352
- ro = round(float(demopose_strength), 2)
353
- rv = round(float(voice_strength), 2)
354
- re = round(float(realism_strength), 2)
355
- rt = round(float(transition_strength), 2)
356
- key_str = f"{pose_lora_path}:{rp}|{general_lora_path}:{rg}|{motion_lora_path}:{rm}|{dreamlay_lora_path}:{rd}|{mself_lora_path}:{rs}|{dramatic_lora_path}:{rr}|{fluid_lora_path}:{rf}|{liquid_lora_path}:{rl}|{demopose_lora_path}:{ro}|{voice_lora_path}:{rv}|{realism_lora_path}:{re}|{transition_lora_path}:{rt}"
357
- key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
358
- return key, key_str
359
-
360
-
361
- def prepare_lora_cache(
362
- pose_strength: float,
363
- general_strength: float,
364
- motion_strength: float,
365
- dreamlay_strength: float,
366
- mself_strength: float,
367
- dramatic_strength: float,
368
- fluid_strength: float,
369
- liquid_strength: float,
370
- demopose_strength: float,
371
- voice_strength: float,
372
- realism_strength: float,
373
- transition_strength: float,
374
- progress=gr.Progress(track_tqdm=True),
375
- ):
376
- """
377
- CPU-only step:
378
- - checks cache
379
- - loads cached fused transformer state_dict, or
380
- - builds fused transformer on CPU and saves it
381
- The resulting state_dict is stored in memory and can be applied later.
382
- """
383
- global PENDING_LORA_KEY, PENDING_LORA_STATE, PENDING_LORA_STATUS
384
-
385
- ledger = pipeline.model_ledger
386
- key, _ = _make_lora_key(pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength)
387
- cache_path = LORA_CACHE_DIR / f"{key}.safetensors"
388
-
389
- progress(0.05, desc="Preparing LoRA state")
390
- if cache_path.exists():
391
- try:
392
- progress(0.20, desc="Loading cached fused state")
393
- state = load_file(str(cache_path))
394
- PENDING_LORA_KEY = key
395
- PENDING_LORA_STATE = state
396
- PENDING_LORA_STATUS = f"Loaded cached LoRA state: {cache_path.name}"
397
- return PENDING_LORA_STATUS
398
- except Exception as e:
399
- print(f"[LoRA] Cache load failed: {type(e).__name__}: {e}")
400
-
401
- entries = [
402
- (pose_lora_path, round(float(pose_strength), 2)),
403
- (general_lora_path, round(float(general_strength), 2)),
404
- (motion_lora_path, round(float(motion_strength), 2)),
405
- (dreamlay_lora_path, round(float(dreamlay_strength), 2)),
406
- (mself_lora_path, round(float(mself_strength), 2)),
407
- (dramatic_lora_path, round(float(dramatic_strength), 2)),
408
- (fluid_lora_path, round(float(fluid_strength), 2)),
409
- (liquid_lora_path, round(float(liquid_strength), 2)),
410
- (demopose_lora_path, round(float(demopose_strength), 2)),
411
- (voice_lora_path, round(float(voice_strength), 2)),
412
- (realism_lora_path, round(float(realism_strength), 2)),
413
- (transition_lora_path, round(float(transition_strength), 2)),
414
- ]
415
- loras_for_builder = [
416
- LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
417
- for path, strength in entries
418
- if path is not None and float(strength) != 0.0
419
- ]
420
-
421
- if not loras_for_builder:
422
- PENDING_LORA_KEY = None
423
- PENDING_LORA_STATE = None
424
- PENDING_LORA_STATUS = "No non-zero LoRA strengths selected; nothing to prepare."
425
- return PENDING_LORA_STATUS
426
-
427
- tmp_ledger = None
428
- new_transformer_cpu = None
429
- try:
430
- progress(0.35, desc="Building fused CPU transformer")
431
- tmp_ledger = pipeline.model_ledger.__class__(
432
- dtype=ledger.dtype,
433
- device=torch.device("cpu"),
434
- checkpoint_path=str(checkpoint_path),
435
- spatial_upsampler_path=str(spatial_upsampler_path),
436
- gemma_root_path=str(gemma_root),
437
- loras=tuple(loras_for_builder),
438
- quantization=getattr(ledger, "quantization", None),
439
- )
440
- new_transformer_cpu = tmp_ledger.transformer()
441
-
442
- progress(0.70, desc="Extracting fused state_dict")
443
- state = {
444
- k: v.detach().cpu().contiguous()
445
- for k, v in new_transformer_cpu.state_dict().items()
446
- }
447
- save_file(state, str(cache_path))
448
-
449
- PENDING_LORA_KEY = key
450
- PENDING_LORA_STATE = state
451
- PENDING_LORA_STATUS = f"Built and cached LoRA state: {cache_path.name}"
452
- return PENDING_LORA_STATUS
453
-
454
- except Exception as e:
455
- import traceback
456
- print(f"[LoRA] Prepare failed: {type(e).__name__}: {e}")
457
- print(traceback.format_exc())
458
- PENDING_LORA_KEY = None
459
- PENDING_LORA_STATE = None
460
- PENDING_LORA_STATUS = f"LoRA prepare failed: {type(e).__name__}: {e}"
461
- return PENDING_LORA_STATUS
462
-
463
- finally:
464
- try:
465
- del new_transformer_cpu
466
- except Exception:
467
- pass
468
- try:
469
- del tmp_ledger
470
- except Exception:
471
- pass
472
- gc.collect()
473
-
474
-
475
- def apply_prepared_lora_state_to_pipeline():
476
- """
477
- Fast step: copy the already prepared CPU state into the live transformer.
478
- This is the only part that should remain near generation time.
479
- """
480
- global current_lora_key, PENDING_LORA_KEY, PENDING_LORA_STATE
481
-
482
- if PENDING_LORA_STATE is None or PENDING_LORA_KEY is None:
483
- print("[LoRA] No prepared LoRA state available; skipping.")
484
- return False
485
-
486
- if current_lora_key == PENDING_LORA_KEY:
487
- print("[LoRA] Prepared LoRA state already active; skipping.")
488
- return True
489
-
490
- existing_transformer = _transformer
491
- with torch.no_grad():
492
- missing, unexpected = existing_transformer.load_state_dict(PENDING_LORA_STATE, strict=False)
493
- if missing or unexpected:
494
- print(f"[LoRA] load_state_dict mismatch: missing={len(missing)}, unexpected={len(unexpected)}")
495
-
496
- current_lora_key = PENDING_LORA_KEY
497
- print("[LoRA] Prepared LoRA state applied to the pipeline.")
498
- return True
499
-
500
- # ---- REPLACE PRELOAD BLOCK START ----
501
- # Preload all models for ZeroGPU tensor packing.
502
- print("Preloading all models (including Gemma and audio components)...")
503
- ledger = pipeline.model_ledger
504
-
505
- # Save the original factory methods so we can rebuild individual components later.
506
- # These are bound callables on ledger that will call the builder when invoked.
507
- _orig_transformer_factory = ledger.transformer
508
- _orig_video_encoder_factory = ledger.video_encoder
509
- _orig_video_decoder_factory = ledger.video_decoder
510
- _orig_audio_encoder_factory = ledger.audio_encoder
511
- _orig_audio_decoder_factory = ledger.audio_decoder
512
- _orig_vocoder_factory = ledger.vocoder
513
- _orig_spatial_upsampler_factory = ledger.spatial_upsampler
514
- _orig_text_encoder_factory = ledger.text_encoder
515
- _orig_gemma_embeddings_factory = ledger.gemma_embeddings_processor
516
-
517
- # Call the original factories once to create the cached instances we will serve by default.
518
- _transformer = _orig_transformer_factory()
519
- _video_encoder = _orig_video_encoder_factory()
520
- _video_decoder = _orig_video_decoder_factory()
521
- _audio_encoder = _orig_audio_encoder_factory()
522
- _audio_decoder = _orig_audio_decoder_factory()
523
- _vocoder = _orig_vocoder_factory()
524
- _spatial_upsampler = _orig_spatial_upsampler_factory()
525
- _text_encoder = _orig_text_encoder_factory()
526
- _embeddings_processor = _orig_gemma_embeddings_factory()
527
-
528
- # Replace ledger methods with lightweight lambdas that return the cached instances.
529
- # We keep the original factories above so we can call them later to rebuild components.
530
- ledger.transformer = lambda: _transformer
531
- ledger.video_encoder = lambda: _video_encoder
532
- ledger.video_decoder = lambda: _video_decoder
533
- ledger.audio_encoder = lambda: _audio_encoder
534
- ledger.audio_decoder = lambda: _audio_decoder
535
- ledger.vocoder = lambda: _vocoder
536
- ledger.spatial_upsampler = lambda: _spatial_upsampler
537
- ledger.text_encoder = lambda: _text_encoder
538
- ledger.gemma_embeddings_processor = lambda: _embeddings_processor
539
-
540
- print("All models preloaded (including Gemma text encoder and audio encoder)!")
541
- # ---- REPLACE PRELOAD BLOCK END ----
542
-
543
- print("=" * 80)
544
- print("Pipeline ready!")
545
- print("=" * 80)
546
-
547
-
548
- def log_memory(tag: str):
549
- if torch.cuda.is_available():
550
- allocated = torch.cuda.memory_allocated() / 1024**3
551
- peak = torch.cuda.max_memory_allocated() / 1024**3
552
- free, total = torch.cuda.mem_get_info()
553
- print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
554
-
555
-
556
- def detect_aspect_ratio(image) -> str:
557
- if image is None:
558
- return "16:9"
559
- if hasattr(image, "size"):
560
- w, h = image.size
561
- elif hasattr(image, "shape"):
562
- h, w = image.shape[:2]
563
- else:
564
- return "16:9"
565
- ratio = w / h
566
- candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
567
- return min(candidates, key=lambda k: abs(ratio - candidates[k]))
568
-
569
-
570
- def on_image_upload(first_image, last_image, high_res):
571
- ref_image = first_image if first_image is not None else last_image
572
- aspect = detect_aspect_ratio(ref_image)
573
- tier = "high" if high_res else "low"
574
- w, h = RESOLUTIONS[tier][aspect]
575
- return gr.update(value=w), gr.update(value=h)
576
-
577
-
578
- def on_highres_toggle(first_image, last_image, high_res):
579
- ref_image = first_image if first_image is not None else last_image
580
- aspect = detect_aspect_ratio(ref_image)
581
- tier = "high" if high_res else "low"
582
- w, h = RESOLUTIONS[tier][aspect]
583
- return gr.update(value=w), gr.update(value=h)
584
-
585
-
586
- def get_gpu_duration(
587
- first_image,
588
- last_image,
589
- input_audio,
590
- prompt: str,
591
- duration: float,
592
- gpu_duration: float,
593
- enhance_prompt: bool = True,
594
- seed: int = 42,
595
- randomize_seed: bool = True,
596
- height: int = 1024,
597
- width: int = 1536,
598
- pose_strength: float = 0.0,
599
- general_strength: float = 0.0,
600
- motion_strength: float = 0.0,
601
- dreamlay_strength: float = 0.0,
602
- mself_strength: float = 0.0,
603
- dramatic_strength: float = 0.0,
604
- fluid_strength: float = 0.0,
605
- liquid_strength: float = 0.0,
606
- demopose_strength: float = 0.0,
607
- voice_strength: float = 0.0,
608
- realism_strength: float = 0.0,
609
- transition_strength: float = 0.0,
610
- progress=None,
611
- ):
612
- return int(gpu_duration)
613
-
614
- @spaces.GPU(duration=get_gpu_duration)
615
- @torch.inference_mode()
616
- def generate_video(
617
- first_image,
618
- last_image,
619
- input_audio,
620
- prompt: str,
621
- duration: float,
622
- gpu_duration: float,
623
- enhance_prompt: bool = True,
624
- seed: int = 42,
625
- randomize_seed: bool = True,
626
- height: int = 1024,
627
- width: int = 1536,
628
- pose_strength: float = 0.0,
629
- general_strength: float = 0.0,
630
- motion_strength: float = 0.0,
631
- dreamlay_strength: float = 0.0,
632
- mself_strength: float = 0.0,
633
- dramatic_strength: float = 0.0,
634
- fluid_strength: float = 0.0,
635
- liquid_strength: float = 0.0,
636
- demopose_strength: float = 0.0,
637
- voice_strength: float = 0.0,
638
- realism_strength: float = 0.0,
639
- transition_strength: float = 0.0,
640
- progress=gr.Progress(track_tqdm=True),
641
- ):
642
- try:
643
- torch.cuda.reset_peak_memory_stats()
644
- log_memory("start")
645
-
646
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
647
-
648
- frame_rate = DEFAULT_FRAME_RATE
649
- num_frames = int(duration * frame_rate) + 1
650
- num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
651
-
652
- print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
653
-
654
- images = []
655
- output_dir = Path("outputs")
656
- output_dir.mkdir(exist_ok=True)
657
-
658
- if first_image is not None:
659
- temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
660
- if hasattr(first_image, "save"):
661
- first_image.save(temp_first_path)
662
- else:
663
- temp_first_path = Path(first_image)
664
- images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
665
-
666
- if last_image is not None:
667
- temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
668
- if hasattr(last_image, "save"):
669
- last_image.save(temp_last_path)
670
- else:
671
- temp_last_path = Path(last_image)
672
- images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
673
-
674
- tiling_config = TilingConfig.default()
675
- video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
676
-
677
- log_memory("before pipeline call")
678
-
679
- apply_prepared_lora_state_to_pipeline()
680
-
681
- video, audio = pipeline(
682
- prompt=prompt,
683
- seed=current_seed,
684
- height=int(height),
685
- width=int(width),
686
- num_frames=num_frames,
687
- frame_rate=frame_rate,
688
- images=images,
689
- audio_path=input_audio,
690
- tiling_config=tiling_config,
691
- enhance_prompt=enhance_prompt,
692
- )
693
-
694
- log_memory("after pipeline call")
695
-
696
- output_path = tempfile.mktemp(suffix=".mp4")
697
- encode_video(
698
- video=video,
699
- fps=frame_rate,
700
- audio=audio,
701
- output_path=output_path,
702
- video_chunks_number=video_chunks_number,
703
- )
704
-
705
- log_memory("after encode_video")
706
- return str(output_path), current_seed
707
-
708
- except Exception as e:
709
- import traceback
710
- log_memory("on error")
711
- print(f"Error: {str(e)}\n{traceback.format_exc()}")
712
- return None, current_seed
713
-
714
-
715
- with gr.Blocks(title="LTX-2.3 Distilled") as demo:
716
- gr.Markdown("# LTX-2.3 F2LF with Fast Audio-Video Generation with Frame Conditioning")
717
-
718
-
719
- with gr.Row():
720
- with gr.Column():
721
- with gr.Row():
722
- first_image = gr.Image(label="First Frame (Optional)", type="pil")
723
- last_image = gr.Image(label="Last Frame (Optional)", type="pil")
724
- input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
725
- prompt = gr.Textbox(
726
- label="Prompt",
727
- info="for best results - make it as elaborate as possible",
728
- value="Make this image come alive with cinematic motion, smooth animation",
729
- lines=3,
730
- placeholder="Describe the motion and animation you want...",
731
- )
732
- duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=30.0, value=10.0, step=0.1)
733
-
734
-
735
- generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
736
-
737
- with gr.Accordion("Advanced Settings", open=False):
738
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
739
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
740
- with gr.Row():
741
- width = gr.Number(label="Width", value=1536, precision=0)
742
- height = gr.Number(label="Height", value=1024, precision=0)
743
- with gr.Row():
744
- enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
745
- high_res = gr.Checkbox(label="High Resolution", value=True)
746
- with gr.Column():
747
- gr.Markdown("### LoRA adapter strengths (set to 0 to disable; slow and WIP)")
748
- pose_strength = gr.Slider(
749
- label="Anthro Enhancer strength",
750
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
751
- )
752
- general_strength = gr.Slider(
753
- label="Reasoning Enhancer strength",
754
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
755
- )
756
- motion_strength = gr.Slider(
757
- label="Anthro Posing Helper strength",
758
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
759
- )
760
- dreamlay_strength = gr.Slider(
761
- label="Dreamlay strength",
762
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
763
- )
764
- mself_strength = gr.Slider(
765
- label="Mself strength",
766
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
767
- )
768
- dramatic_strength = gr.Slider(
769
- label="Dramatic strength",
770
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
771
- )
772
- fluid_strength = gr.Slider(
773
- label="Fluid Helper strength",
774
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
775
- )
776
- liquid_strength = gr.Slider(
777
- label="Transition Helper strength",
778
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
779
- )
780
- demopose_strength = gr.Slider(
781
- label="Audio Helper strength",
782
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
783
- )
784
- voice_strength = gr.Slider(
785
- label="Voice Helper strength",
786
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
787
- )
788
- realism_strength = gr.Slider(
789
- label="Anthro Realism strength",
790
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
791
- )
792
- transition_strength = gr.Slider(
793
- label="Transition strength",
794
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
795
- )
796
- prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary")
797
- lora_status = gr.Textbox(
798
- label="LoRA Cache Status",
799
- value="No LoRA state prepared yet.",
800
- interactive=False,
801
- )
802
-
803
- with gr.Column():
804
- output_video = gr.Video(label="Generated Video", autoplay=False)
805
- gpu_duration = gr.Slider(
806
- label="ZeroGPU duration (seconds; 10 second Img2Vid with 1024x1024 and LoRAs = ~70)",
807
- minimum=30.0,
808
- maximum=240.0,
809
- value=75.0,
810
- step=1.0,
811
- )
812
-
813
- gr.Examples(
814
- examples=[
815
- [
816
- None,
817
- "pinkknit.jpg",
818
- None,
819
- "The camera falls downward through darkness as if dropped into a tunnel. "
820
- "As it slows, five friends wearing pink knitted hats and sunglasses lean "
821
- "over and look down toward the camera with curious expressions. The lens "
822
- "has a strong fisheye effect, creating a circular frame around them. They "
823
- "crowd together closely, forming a symmetrical cluster while staring "
824
- "directly into the lens.",
825
- 3.0,
826
- 80.0,
827
- False,
828
- 42,
829
- True,
830
- 1024,
831
- 1024,
832
- 0.0, # pose_strength (example)
833
- 0.0, # general_strength (example)
834
- 0.0, # motion_strength (example)
835
- 0.0,
836
- 0.0,
837
- 0.0,
838
- 0.0,
839
- 0.0,
840
- 0.0,
841
- 0.0,
842
- 0.0,
843
- 0.0,
844
- ],
845
- ],
846
- inputs=[
847
- first_image, last_image, input_audio, prompt, duration, gpu_duration,
848
- enhance_prompt, seed, randomize_seed, height, width,
849
- pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength,
850
- ],
851
- )
852
-
853
- first_image.change(
854
- fn=on_image_upload,
855
- inputs=[first_image, last_image, high_res],
856
- outputs=[width, height],
857
- )
858
-
859
- last_image.change(
860
- fn=on_image_upload,
861
- inputs=[first_image, last_image, high_res],
862
- outputs=[width, height],
863
- )
864
-
865
- high_res.change(
866
- fn=on_highres_toggle,
867
- inputs=[first_image, last_image, high_res],
868
- outputs=[width, height],
869
- )
870
-
871
- prepare_lora_btn.click(
872
- fn=prepare_lora_cache,
873
- inputs=[pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength],
874
- outputs=[lora_status],
875
- )
876
-
877
- generate_btn.click(
878
- fn=generate_video,
879
- inputs=[
880
- first_image, last_image, input_audio, prompt, duration, gpu_duration, enhance_prompt,
881
- seed, randomize_seed, height, width,
882
- pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength,
883
- ],
884
- outputs=[output_video, seed],
885
- )
886
-
887
-
888
- css = """
889
- .fillable{max-width: 1200px !important}
890
- """
891
-
892
- if __name__ == "__main__":
893
- demo.launch(theme=gr.themes.Citrus(), css=css)