dagloop5 commited on
Commit
2622486
·
verified ·
1 Parent(s): 30947a4

Delete app(WIP).py

Browse files
Files changed (1) hide show
  1. app(WIP).py +0 -900
app(WIP).py DELETED
@@ -1,900 +0,0 @@
1
- import os
2
- import subprocess
3
- import sys
4
-
5
- # Disable torch.compile / dynamo before any torch import
6
- os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
- os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
-
9
- # Install xformers for memory-efficient attention
10
- subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
-
12
- # Clone LTX-2 repo and install packages
13
- LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
- LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
-
16
- LTX_COMMIT = "ae855f8538843825f9015a419cf4ba5edaf5eec2" # known working commit with decode_video
17
-
18
- if not os.path.exists(LTX_REPO_DIR):
19
- print(f"Cloning {LTX_REPO_URL}...")
20
- subprocess.run(["git", "clone", LTX_REPO_URL, LTX_REPO_DIR], check=True)
21
- subprocess.run(["git", "checkout", LTX_COMMIT], cwd=LTX_REPO_DIR, check=True)
22
-
23
- print("Installing ltx-core and ltx-pipelines from cloned repo...")
24
- subprocess.run(
25
- [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
26
- os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
27
- "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
28
- check=True,
29
- )
30
-
31
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
32
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
33
-
34
- import logging
35
- import random
36
- import tempfile
37
- from pathlib import Path
38
- import gc
39
- import hashlib
40
-
41
- import torch
42
- torch._dynamo.config.suppress_errors = True
43
- torch._dynamo.config.disable = True
44
-
45
- import spaces
46
- import gradio as gr
47
- import numpy as np
48
- from huggingface_hub import hf_hub_download, snapshot_download
49
- from safetensors.torch import load_file, save_file
50
- from safetensors import safe_open
51
- import json
52
- import requests
53
-
54
- from ltx_core.components.diffusion_steps import EulerDiffusionStep
55
- from ltx_core.components.noisers import GaussianNoiser
56
- from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
57
- from ltx_core.model.upsampler import upsample_video
58
- from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number, decode_video as vae_decode_video
59
- from ltx_core.quantization import QuantizationPolicy
60
- from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
61
- from ltx_pipelines.distilled import DistilledPipeline
62
- from ltx_pipelines.utils import euler_denoising_loop
63
- from ltx_pipelines.utils.args import ImageConditioningInput
64
- from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
65
- from ltx_pipelines.utils.helpers import (
66
- cleanup_memory,
67
- combined_image_conditionings,
68
- denoise_audio_video,
69
- denoise_video_only,
70
- encode_prompts,
71
- simple_denoising_func,
72
- )
73
- from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
74
- from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
75
- from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
76
-
77
- # Force-patch xformers attention into the LTX attention module.
78
- from ltx_core.model.transformer import attention as _attn_mod
79
- print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
80
- try:
81
- from xformers.ops import memory_efficient_attention as _mea
82
- _attn_mod.memory_efficient_attention = _mea
83
- print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
84
- except Exception as e:
85
- print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
86
-
87
- logging.getLogger().setLevel(logging.INFO)
88
-
89
- MAX_SEED = np.iinfo(np.int32).max
90
- DEFAULT_PROMPT = (
91
- "An astronaut hatches from a fragile egg on the surface of the Moon, "
92
- "the shell cracking and peeling apart in gentle low-gravity motion. "
93
- "Fine lunar dust lifts and drifts outward with each movement, floating "
94
- "in slow arcs before settling back onto the ground."
95
- )
96
- DEFAULT_FRAME_RATE = 24.0
97
-
98
- # Resolution presets: (width, height)
99
- RESOLUTIONS = {
100
- "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
101
- "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
102
- }
103
-
104
-
105
- class LTX23DistilledA2VPipeline(DistilledPipeline):
106
- """DistilledPipeline: single stage, full resolution, 8 steps, with optional audio."""
107
-
108
- def __call__(
109
- self,
110
- prompt: str,
111
- seed: int,
112
- height: int,
113
- width: int,
114
- num_frames: int,
115
- frame_rate: float,
116
- images: list[ImageConditioningInput],
117
- audio_path: str | None = None,
118
- tiling_config: TilingConfig | None = None,
119
- enhance_prompt: bool = False,
120
- ):
121
- print(prompt)
122
-
123
- generator = torch.Generator(device=self.device).manual_seed(seed)
124
- noiser = GaussianNoiser(generator=generator)
125
- stepper = EulerDiffusionStep()
126
- dtype = torch.bfloat16
127
-
128
- (ctx_p,) = encode_prompts(
129
- [prompt],
130
- self.model_ledger,
131
- enhance_first_prompt=enhance_prompt,
132
- enhance_prompt_image=images[0].path if len(images) > 0 else None,
133
- )
134
- video_context, audio_context = ctx_p.video_encoding, ctx_p.audio_encoding
135
-
136
- # Audio encoding — only runs if audio is provided
137
- encoded_audio_latent = None
138
- original_audio = None
139
- if audio_path is not None:
140
- video_duration = num_frames / frame_rate
141
- decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
142
- if decoded_audio is None:
143
- raise ValueError(f"Could not extract audio stream from {audio_path}")
144
-
145
- encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
146
- audio_shape = AudioLatentShape.from_duration(batch=1, duration=video_duration, channels=8, mel_bins=16)
147
- expected_frames = audio_shape.frames
148
- actual_frames = encoded_audio_latent.shape[2]
149
-
150
- if actual_frames > expected_frames:
151
- encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
152
- elif actual_frames < expected_frames:
153
- pad = torch.zeros(
154
- encoded_audio_latent.shape[0],
155
- encoded_audio_latent.shape[1],
156
- expected_frames - actual_frames,
157
- encoded_audio_latent.shape[3],
158
- device=encoded_audio_latent.device,
159
- dtype=encoded_audio_latent.dtype,
160
- )
161
- encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
162
-
163
- original_audio = Audio(
164
- waveform=decoded_audio.waveform.squeeze(0),
165
- sampling_rate=decoded_audio.sampling_rate,
166
- )
167
-
168
- video_encoder = self.model_ledger.video_encoder()
169
- transformer = self.model_ledger.transformer()
170
- sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
171
-
172
- def denoising_loop(sigmas, video_state, audio_state, stepper):
173
- return euler_denoising_loop(
174
- sigmas=sigmas,
175
- video_state=video_state,
176
- audio_state=audio_state,
177
- stepper=stepper,
178
- denoise_fn=simple_denoising_func(
179
- video_context=video_context,
180
- audio_context=audio_context,
181
- transformer=transformer,
182
- ),
183
- )
184
-
185
- output_shape = VideoPixelShape(
186
- batch=1,
187
- frames=num_frames,
188
- width=width,
189
- height=height,
190
- fps=frame_rate,
191
- )
192
- conditionings = combined_image_conditionings(
193
- images=images,
194
- height=output_shape.height,
195
- width=output_shape.width,
196
- video_encoder=video_encoder,
197
- dtype=dtype,
198
- device=self.device,
199
- )
200
- video_state, audio_state = denoise_audio_video(
201
- output_shape=output_shape,
202
- conditionings=conditionings,
203
- noiser=noiser,
204
- sigmas=sigmas,
205
- stepper=stepper,
206
- denoising_loop_fn=denoising_loop,
207
- components=self.pipeline_components,
208
- dtype=dtype,
209
- device=self.device,
210
- initial_audio_latent=encoded_audio_latent,
211
- )
212
-
213
- torch.cuda.synchronize()
214
- del transformer
215
- del video_encoder
216
- cleanup_memory()
217
-
218
- decoded_video = vae_decode_video(
219
- video_state.latent,
220
- self.model_ledger.video_decoder(),
221
- tiling_config,
222
- generator,
223
- )
224
-
225
- # If audio was provided as input, return it as-is (higher fidelity than decoded)
226
- # If no audio input, decode the generated audio latent from the denoising
227
- if original_audio is not None:
228
- return decoded_video, original_audio
229
- else:
230
- from ltx_core.model.audio_vae import decode_audio as vae_decode_audio
231
- generated_audio = vae_decode_audio(
232
- audio_state.latent,
233
- self.model_ledger.audio_decoder(),
234
- self.model_ledger.vocoder(),
235
- )
236
- return decoded_video, generated_audio
237
-
238
-
239
- # Model repos
240
- LTX_MODEL_REPO = "Lightricks/LTX-2.3"
241
- GEMMA_REPO ="Lightricks/gemma-3-12b-it-qat-q4_0-unquantized"
242
- GEMMA_ABLITERATED_REPO = "Sikaworld1990/gemma-3-12b-it-abliterated-sikaworld-high-fidelity-edition-Ltx-2"
243
- GEMMA_ABLITERATED_FILE = "gemma-3-12b-it-abliterated-sikaworld-high-fidelity-edition.safetensors"
244
-
245
- # Download model checkpoints
246
- print("=" * 80)
247
- print("Downloading LTX-2.3 distilled model + Gemma...")
248
- print("=" * 80)
249
-
250
- # LoRA cache directory and currently-applied key
251
- LORA_CACHE_DIR = Path("lora_cache")
252
- LORA_CACHE_DIR.mkdir(exist_ok=True)
253
- current_lora_key: str | None = None
254
-
255
- weights_dir = Path("weights")
256
- weights_dir.mkdir(exist_ok=True)
257
- checkpoint_path = hf_hub_download(
258
- repo_id=LTX_MODEL_REPO,
259
- filename="ltx-2.3-22b-distilled-1.1.safetensors",
260
- local_dir=str(weights_dir),
261
- local_dir_use_symlinks=False,
262
- )
263
- spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
264
-
265
- print("[Gemma] Setting up abliterated Gemma text encoder...")
266
- MERGED_WEIGHTS = "/tmp/abliterated_gemma_merged.safetensors"
267
- gemma_root = "/tmp/abliterated_gemma"
268
- os.makedirs(gemma_root, exist_ok=True)
269
-
270
- gemma_official_dir = snapshot_download(
271
- repo_id=GEMMA_REPO,
272
- ignore_patterns=["*.safetensors", "*.safetensors.index.json"],
273
- )
274
-
275
- for fname in os.listdir(gemma_official_dir):
276
- src = os.path.join(gemma_official_dir, fname)
277
- dst = os.path.join(gemma_root, fname)
278
- if os.path.isfile(src) and not fname.endswith(".safetensors") and fname != "model.safetensors.index.json":
279
- if not os.path.exists(dst):
280
- os.symlink(src, dst)
281
-
282
- if os.path.exists(MERGED_WEIGHTS):
283
- print("[Gemma] Using cached merged weights")
284
- else:
285
- abliterated_weights_path = hf_hub_download(
286
- repo_id=GEMMA_ABLITERATED_REPO,
287
- filename=GEMMA_ABLITERATED_FILE,
288
- )
289
- index_path = hf_hub_download(
290
- repo_id=GEMMA_REPO,
291
- filename="model.safetensors.index.json"
292
- )
293
- with open(index_path) as f:
294
- weight_index = json.load(f)
295
-
296
- vision_keys = {}
297
- for key, shard in weight_index["weight_map"].items():
298
- if "vision_tower" in key or "multi_modal_projector" in key:
299
- vision_keys[key] = shard
300
- needed_shards = set(vision_keys.values())
301
-
302
- shard_paths = {}
303
- for shard_name in needed_shards:
304
- shard_paths[shard_name] = hf_hub_download(
305
- repo_id=GEMMA_REPO,
306
- filename=shard_name
307
- )
308
-
309
- _fp8_types = {torch.float8_e4m3fn, torch.float8_e5m2}
310
- raw = load_file(abliterated_weights_path)
311
- merged = {}
312
- for key, tensor in raw.items():
313
- t = tensor.to(torch.bfloat16) if tensor.dtype in _fp8_types else tensor
314
- merged[f"language_model.{key}"] = t
315
- del raw
316
-
317
- for key, shard_name in vision_keys.items():
318
- with safe_open(shard_paths[shard_name], framework="pt") as f:
319
- merged[key] = f.get_tensor(key)
320
-
321
- save_file(merged, MERGED_WEIGHTS)
322
- del merged
323
- gc.collect()
324
-
325
- weight_link = os.path.join(gemma_root, "model.safetensors")
326
- if os.path.exists(weight_link):
327
- os.remove(weight_link)
328
- os.symlink(MERGED_WEIGHTS, weight_link)
329
- print(f"[Gemma] Root ready: {gemma_root}")
330
-
331
- # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
332
- # LoRA repo + download the requested LoRA adapters
333
- LORA_REPO = "dagloop5/LoRA"
334
-
335
- print("=" * 80)
336
- print("Downloading LoRA adapters from dagloop5/LoRA...")
337
- print("=" * 80)
338
- pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2_3_NSFW_furry_concat_v2.safetensors")
339
- general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX2.3_reasoning_I2V_V3.safetensors")
340
- motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
341
- dreamlay_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="DR34ML4Y_LTXXX_PREVIEW_RC1.safetensors") # m15510n4ry, bl0wj0b, d0ubl3_bj, d0gg1e, c0wg1rl
342
- mself_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="Furry Hyper Masturbation - LTX-2 I2V v1.safetensors") # Hyperfap
343
- dramatic_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="LTX-2.3 - Orgasm.safetensors") # "[He | She] is having am orgasm." (am or an?)
344
- fluid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="cr3ampi3_animation_i2v_ltx2_v1.0.safetensors") # cr3ampi3 animation., missionary animation, doggystyle bouncy animation, double penetration animation
345
- liquid_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="liquid_wet_dr1pp_ltx2_v1.0_scaled.safetensors") # wet dr1pp
346
- demopose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="clapping-cheeks-audio-v001-alpha.safetensors")
347
- voice_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="hentai_voice_ltx23.safetensors")
348
- realism_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="FurryenhancerLTX2.3V1.215.safetensors")
349
- transition_lora_path = hf_hub_download(repo_id="valiantcat/LTX-2.3-Transition-LORA", filename="ltx2.3-transition.safetensors")
350
-
351
- print(f"Pose LoRA: {pose_lora_path}")
352
- print(f"General LoRA: {general_lora_path}")
353
- print(f"Motion LoRA: {motion_lora_path}")
354
- print(f"Dreamlay LoRA: {dreamlay_lora_path}")
355
- print(f"Mself LoRA: {mself_lora_path}")
356
- print(f"Dramatic LoRA: {dramatic_lora_path}")
357
- print(f"Fluid LoRA: {fluid_lora_path}")
358
- print(f"Liquid LoRA: {liquid_lora_path}")
359
- print(f"Demopose LoRA: {demopose_lora_path}")
360
- print(f"Voice LoRA: {voice_lora_path}")
361
- print(f"Realism LoRA: {realism_lora_path}")
362
- print(f"Transition LoRA: {transition_lora_path}")
363
- # ----------------------------------------------------------------
364
-
365
- print(f"Spatial upsampler: {spatial_upsampler_path}")
366
- print(f"Checkpoint: {checkpoint_path}")
367
-
368
- # Initialize pipeline WITH text encoder and optional audio support
369
- # ---- Replace block (pipeline init) lines 275-281 ----
370
- pipeline = LTX23DistilledA2VPipeline(
371
- distilled_checkpoint_path=checkpoint_path,
372
- spatial_upsampler_path=spatial_upsampler_path,
373
- gemma_root=gemma_root,
374
- loras=[],
375
- quantization=QuantizationPolicy.fp8_cast(), # keep FP8 quantization unchanged
376
- )
377
- # ----------------------------------------------------------------
378
-
379
- # Currently applied LoRA deltas — stored so they can be undone before re-applying
380
- _applied_lora_deltas: dict[str, torch.Tensor] = {}
381
- _applied_lora_config: list[tuple[str, float]] = []
382
-
383
-
384
- def _load_and_rename_lora_tensors(lora_path: str) -> dict[str, torch.Tensor]:
385
- """Load LoRA tensors from disk and apply ComfyUI→LTX key renaming."""
386
- tensors = {}
387
- with safe_open(lora_path, framework="pt", device="cpu") as f:
388
- for key in f.keys():
389
- tensors[key] = f.get_tensor(key)
390
-
391
- renamed = {}
392
- for key, tensor in tensors.items():
393
- new_key = key
394
- for old_substr, new_substr in LTXV_LORA_COMFY_RENAMING_MAP.items():
395
- new_key = new_key.replace(old_substr, new_substr)
396
- renamed[new_key] = tensor
397
-
398
- return renamed
399
-
400
-
401
- def _compute_lora_deltas(lora_path: str, strength: float) -> dict[str, torch.Tensor]:
402
- """Compute weight delta tensors for a single LoRA at given strength."""
403
- tensors = _load_and_rename_lora_tensors(lora_path)
404
- deltas = {}
405
-
406
- # Collect all base keys that have a down component
407
- base_keys = set()
408
- for key in tensors:
409
- for suffix in [".lora_down.weight", ".lora_A.weight"]:
410
- if key.endswith(suffix):
411
- base_keys.add(key[: -len(suffix)])
412
-
413
- for base in base_keys:
414
- down = tensors.get(base + ".lora_down.weight") or tensors.get(base + ".lora_A.weight")
415
- up = tensors.get(base + ".lora_up.weight") or tensors.get(base + ".lora_B.weight")
416
-
417
- if down is None or up is None:
418
- continue
419
-
420
- alpha_val = tensors.get(base + ".alpha")
421
- scale = (alpha_val.item() / down.shape[0]) if alpha_val is not None else 1.0
422
-
423
- down_f = down.float()
424
- up_f = up.float()
425
-
426
- if down_f.dim() == 2 and up_f.dim() == 2:
427
- delta = up_f @ down_f
428
- elif down_f.dim() == 4:
429
- delta = (up_f.flatten(1) @ down_f.flatten(1)).view(
430
- up_f.shape[0], down_f.shape[1], *up_f.shape[2:]
431
- )
432
- else:
433
- print(f"[LoRA] Skipping {base}: unexpected dims down={down_f.dim()} up={up_f.dim()}")
434
- continue
435
-
436
- deltas[base + ".weight"] = (delta * strength * scale).to(torch.bfloat16)
437
-
438
- return deltas
439
-
440
-
441
- def apply_loras_to_transformer(
442
- pose_strength, general_strength, motion_strength, dreamlay_strength,
443
- mself_strength, dramatic_strength, fluid_strength, liquid_strength,
444
- demopose_strength, voice_strength, realism_strength, transition_strength,
445
- ):
446
- global _applied_lora_deltas, _applied_lora_config
447
-
448
- lora_configs = [
449
- (pose_lora_path, round(float(pose_strength), 2)),
450
- (general_lora_path, round(float(general_strength), 2)),
451
- (motion_lora_path, round(float(motion_strength), 2)),
452
- (dreamlay_lora_path, round(float(dreamlay_strength), 2)),
453
- (mself_lora_path, round(float(mself_strength), 2)),
454
- (dramatic_lora_path, round(float(dramatic_strength), 2)),
455
- (fluid_lora_path, round(float(fluid_strength), 2)),
456
- (liquid_lora_path, round(float(liquid_strength), 2)),
457
- (demopose_lora_path, round(float(demopose_strength), 2)),
458
- (voice_lora_path, round(float(voice_strength), 2)),
459
- (realism_lora_path, round(float(realism_strength), 2)),
460
- (transition_lora_path, round(float(transition_strength), 2)),
461
- ]
462
-
463
- # Skip if config hasn't changed since last application
464
- if lora_configs == _applied_lora_config:
465
- print("[LoRA] Config unchanged, skipping re-application.")
466
- return
467
-
468
- # Undo previously applied deltas
469
- if _applied_lora_deltas:
470
- print(f"[LoRA] Undoing {len(_applied_lora_deltas)} previously applied delta(s)...")
471
- with torch.no_grad():
472
- for name, param in _transformer.named_parameters():
473
- if name in _applied_lora_deltas:
474
- param.data -= _applied_lora_deltas[name].to(
475
- device=param.device, dtype=param.dtype
476
- )
477
- _applied_lora_deltas = {}
478
- gc.collect()
479
-
480
- active = [(p, s) for p, s in lora_configs if p is not None and s != 0.0]
481
- if not active:
482
- print("[LoRA] No active LoRAs.")
483
- _applied_lora_config = lora_configs
484
- return
485
-
486
- print(f"[LoRA] Computing deltas for {len(active)} active LoRA(s)...")
487
- combined_deltas: dict[str, torch.Tensor] = {}
488
- for lora_path, strength in active:
489
- try:
490
- deltas = _compute_lora_deltas(lora_path, strength)
491
- for key, delta in deltas.items():
492
- if key in combined_deltas:
493
- combined_deltas[key] = combined_deltas[key] + delta
494
- else:
495
- combined_deltas[key] = delta
496
- print(f"[LoRA] {Path(lora_path).name}: {len(deltas)} delta(s) at strength {strength}")
497
- except Exception as e:
498
- import traceback
499
- print(f"[LoRA] Failed on {lora_path}: {e}\n{traceback.format_exc()}")
500
-
501
- applied_count = 0
502
- with torch.no_grad():
503
- for name, param in _transformer.named_parameters():
504
- if name in combined_deltas:
505
- param.data += combined_deltas[name].to(
506
- device=param.device, dtype=param.dtype
507
- )
508
- applied_count += 1
509
-
510
- _applied_lora_deltas = combined_deltas
511
- _applied_lora_config = lora_configs
512
- print(f"[LoRA] Applied {applied_count} weight delta(s) to live transformer.")
513
- gc.collect()
514
-
515
- # ---- REPLACE PRELOAD BLOCK START ----
516
- # Preload all models for ZeroGPU tensor packing.
517
- print("Preloading all models (including Gemma and audio components)...")
518
- ledger = pipeline.model_ledger
519
-
520
- # Save the original factory methods so we can rebuild individual components later.
521
- # These are bound callables on ledger that will call the builder when invoked.
522
- _orig_transformer_factory = ledger.transformer
523
- _orig_video_encoder_factory = ledger.video_encoder
524
- _orig_video_decoder_factory = ledger.video_decoder
525
- _orig_audio_encoder_factory = ledger.audio_encoder
526
- _orig_audio_decoder_factory = ledger.audio_decoder
527
- _orig_vocoder_factory = ledger.vocoder
528
- _orig_spatial_upsampler_factory = ledger.spatial_upsampler
529
- _orig_text_encoder_factory = ledger.text_encoder
530
- _orig_gemma_embeddings_factory = ledger.gemma_embeddings_processor
531
-
532
- # Call the original factories once to create the cached instances we will serve by default.
533
- _transformer = _orig_transformer_factory()
534
- _video_encoder = _orig_video_encoder_factory()
535
- _video_decoder = _orig_video_decoder_factory()
536
- _audio_encoder = _orig_audio_encoder_factory()
537
- _audio_decoder = _orig_audio_decoder_factory()
538
- _vocoder = _orig_vocoder_factory()
539
- _spatial_upsampler = _orig_spatial_upsampler_factory()
540
- _text_encoder = _orig_text_encoder_factory()
541
- _embeddings_processor = _orig_gemma_embeddings_factory()
542
-
543
- # Replace ledger methods with lightweight lambdas that return the cached instances.
544
- # We keep the original factories above so we can call them later to rebuild components.
545
- ledger.transformer = lambda: _transformer
546
- ledger.video_encoder = lambda: _video_encoder
547
- ledger.video_decoder = lambda: _video_decoder
548
- ledger.audio_encoder = lambda: _audio_encoder
549
- ledger.audio_decoder = lambda: _audio_decoder
550
- ledger.vocoder = lambda: _vocoder
551
- ledger.spatial_upsampler = lambda: _spatial_upsampler
552
- ledger.text_encoder = lambda: _text_encoder
553
- ledger.gemma_embeddings_processor = lambda: _embeddings_processor
554
-
555
- print("All models preloaded (including Gemma text encoder and audio encoder)!")
556
- # ---- REPLACE PRELOAD BLOCK END ----
557
-
558
- print("=" * 80)
559
- print("Pipeline ready!")
560
- print("=" * 80)
561
-
562
-
563
- def log_memory(tag: str):
564
- if torch.cuda.is_available():
565
- allocated = torch.cuda.memory_allocated() / 1024**3
566
- peak = torch.cuda.max_memory_allocated() / 1024**3
567
- free, total = torch.cuda.mem_get_info()
568
- print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
569
-
570
-
571
- def detect_aspect_ratio(image) -> str:
572
- if image is None:
573
- return "16:9"
574
- if hasattr(image, "size"):
575
- w, h = image.size
576
- elif hasattr(image, "shape"):
577
- h, w = image.shape[:2]
578
- else:
579
- return "16:9"
580
- ratio = w / h
581
- candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
582
- return min(candidates, key=lambda k: abs(ratio - candidates[k]))
583
-
584
-
585
- def on_image_upload(first_image, last_image, high_res):
586
- ref_image = first_image if first_image is not None else last_image
587
- aspect = detect_aspect_ratio(ref_image)
588
- tier = "high" if high_res else "low"
589
- w, h = RESOLUTIONS[tier][aspect]
590
- return gr.update(value=w), gr.update(value=h)
591
-
592
-
593
- def on_highres_toggle(first_image, last_image, high_res):
594
- ref_image = first_image if first_image is not None else last_image
595
- aspect = detect_aspect_ratio(ref_image)
596
- tier = "high" if high_res else "low"
597
- w, h = RESOLUTIONS[tier][aspect]
598
- return gr.update(value=w), gr.update(value=h)
599
-
600
-
601
- def get_gpu_duration(
602
- first_image,
603
- last_image,
604
- input_audio,
605
- prompt: str,
606
- duration: float,
607
- gpu_duration: float,
608
- enhance_prompt: bool = True,
609
- seed: int = 42,
610
- randomize_seed: bool = True,
611
- height: int = 1024,
612
- width: int = 1536,
613
- pose_strength: float = 0.0,
614
- general_strength: float = 0.0,
615
- motion_strength: float = 0.0,
616
- dreamlay_strength: float = 0.0,
617
- mself_strength: float = 0.0,
618
- dramatic_strength: float = 0.0,
619
- fluid_strength: float = 0.0,
620
- liquid_strength: float = 0.0,
621
- demopose_strength: float = 0.0,
622
- voice_strength: float = 0.0,
623
- realism_strength: float = 0.0,
624
- transition_strength: float = 0.0,
625
- progress=None,
626
- ):
627
- return int(gpu_duration)
628
-
629
- @spaces.GPU(duration=get_gpu_duration)
630
- @torch.inference_mode()
631
- def generate_video(
632
- first_image,
633
- last_image,
634
- input_audio,
635
- prompt: str,
636
- duration: float,
637
- gpu_duration: float,
638
- enhance_prompt: bool = True,
639
- seed: int = 42,
640
- randomize_seed: bool = True,
641
- height: int = 1024,
642
- width: int = 1536,
643
- pose_strength: float = 0.0,
644
- general_strength: float = 0.0,
645
- motion_strength: float = 0.0,
646
- dreamlay_strength: float = 0.0,
647
- mself_strength: float = 0.0,
648
- dramatic_strength: float = 0.0,
649
- fluid_strength: float = 0.0,
650
- liquid_strength: float = 0.0,
651
- demopose_strength: float = 0.0,
652
- voice_strength: float = 0.0,
653
- realism_strength: float = 0.0,
654
- transition_strength: float = 0.0,
655
- progress=gr.Progress(track_tqdm=True),
656
- ):
657
- try:
658
- torch.cuda.reset_peak_memory_stats()
659
- log_memory("start")
660
-
661
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
662
-
663
- frame_rate = DEFAULT_FRAME_RATE
664
- num_frames = int(duration * frame_rate) + 1
665
- num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
666
-
667
- print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
668
-
669
- images = []
670
- output_dir = Path("outputs")
671
- output_dir.mkdir(exist_ok=True)
672
-
673
- if first_image is not None:
674
- temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
675
- if hasattr(first_image, "save"):
676
- first_image.save(temp_first_path)
677
- else:
678
- temp_first_path = Path(first_image)
679
- images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
680
-
681
- if last_image is not None:
682
- temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
683
- if hasattr(last_image, "save"):
684
- last_image.save(temp_last_path)
685
- else:
686
- temp_last_path = Path(last_image)
687
- images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
688
-
689
- tiling_config = TilingConfig.default()
690
- video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
691
-
692
- log_memory("before pipeline call")
693
-
694
- apply_loras_to_transformer(
695
- pose_strength, general_strength, motion_strength, dreamlay_strength,
696
- mself_strength, dramatic_strength, fluid_strength, liquid_strength,
697
- demopose_strength, voice_strength, realism_strength, transition_strength,
698
- )
699
-
700
- video, audio = pipeline(
701
- prompt=prompt,
702
- seed=current_seed,
703
- height=int(height),
704
- width=int(width),
705
- num_frames=num_frames,
706
- frame_rate=frame_rate,
707
- images=images,
708
- audio_path=input_audio,
709
- tiling_config=tiling_config,
710
- enhance_prompt=enhance_prompt,
711
- )
712
-
713
- log_memory("after pipeline call")
714
-
715
- output_path = tempfile.mktemp(suffix=".mp4")
716
- encode_video(
717
- video=video,
718
- fps=frame_rate,
719
- audio=audio,
720
- output_path=output_path,
721
- video_chunks_number=video_chunks_number,
722
- )
723
-
724
- log_memory("after encode_video")
725
- return str(output_path), current_seed
726
-
727
- except Exception as e:
728
- import traceback
729
- log_memory("on error")
730
- print(f"Error: {str(e)}\n{traceback.format_exc()}")
731
- return None, current_seed
732
-
733
-
734
- with gr.Blocks(title="LTX-2.3 Distilled") as demo:
735
- gr.Markdown("# LTX-2.3 F2LF with Fast Audio-Video Generation with Frame Conditioning")
736
-
737
-
738
- with gr.Row():
739
- with gr.Column():
740
- with gr.Row():
741
- first_image = gr.Image(label="First Frame (Optional)", type="pil")
742
- last_image = gr.Image(label="Last Frame (Optional)", type="pil")
743
- input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
744
- prompt = gr.Textbox(
745
- label="Prompt",
746
- info="for best results - make it as elaborate as possible",
747
- value="Make this image come alive with cinematic motion, smooth animation",
748
- lines=3,
749
- placeholder="Describe the motion and animation you want...",
750
- )
751
- duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=30.0, value=10.0, step=0.1)
752
-
753
-
754
- generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
755
-
756
- with gr.Accordion("Advanced Settings", open=False):
757
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
758
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
759
- with gr.Row():
760
- width = gr.Number(label="Width", value=1536, precision=0)
761
- height = gr.Number(label="Height", value=1024, precision=0)
762
- with gr.Row():
763
- enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
764
- high_res = gr.Checkbox(label="High Resolution", value=True)
765
- with gr.Column():
766
- gr.Markdown("### LoRA adapter strengths (set to 0 to disable; slow and WIP)")
767
- pose_strength = gr.Slider(
768
- label="Anthro Enhancer strength",
769
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
770
- )
771
- general_strength = gr.Slider(
772
- label="Reasoning Enhancer strength",
773
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
774
- )
775
- motion_strength = gr.Slider(
776
- label="Anthro Posing Helper strength",
777
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
778
- )
779
- dreamlay_strength = gr.Slider(
780
- label="Dreamlay strength",
781
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
782
- )
783
- mself_strength = gr.Slider(
784
- label="Mself strength",
785
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
786
- )
787
- dramatic_strength = gr.Slider(
788
- label="Dramatic strength",
789
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
790
- )
791
- fluid_strength = gr.Slider(
792
- label="Fluid Helper strength",
793
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
794
- )
795
- liquid_strength = gr.Slider(
796
- label="Liquid Helper strength",
797
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
798
- )
799
- demopose_strength = gr.Slider(
800
- label="Audio Helper strength",
801
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
802
- )
803
- voice_strength = gr.Slider(
804
- label="Voice Helper strength",
805
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
806
- )
807
- realism_strength = gr.Slider(
808
- label="Anthro Realism strength",
809
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
810
- )
811
- transition_strength = gr.Slider(
812
- label="Transition strength",
813
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
814
- )
815
-
816
- with gr.Column():
817
- output_video = gr.Video(label="Generated Video", autoplay=False)
818
- gpu_duration = gr.Slider(
819
- label="ZeroGPU duration (seconds; 10 second Img2Vid with 1024x1024 and LoRAs = ~70)",
820
- minimum=30.0,
821
- maximum=240.0,
822
- value=75.0,
823
- step=1.0,
824
- )
825
-
826
- gr.Examples(
827
- examples=[
828
- [
829
- None,
830
- "pinkknit.jpg",
831
- None,
832
- "The camera falls downward through darkness as if dropped into a tunnel. "
833
- "As it slows, five friends wearing pink knitted hats and sunglasses lean "
834
- "over and look down toward the camera with curious expressions. The lens "
835
- "has a strong fisheye effect, creating a circular frame around them. They "
836
- "crowd together closely, forming a symmetrical cluster while staring "
837
- "directly into the lens.",
838
- 3.0,
839
- 80.0,
840
- False,
841
- 42,
842
- True,
843
- 1024,
844
- 1024,
845
- 0.0, # pose_strength (example)
846
- 0.0, # general_strength (example)
847
- 0.0, # motion_strength (example)
848
- 0.0,
849
- 0.0,
850
- 0.0,
851
- 0.0,
852
- 0.0,
853
- 0.0,
854
- 0.0,
855
- 0.0,
856
- 0.0,
857
- ],
858
- ],
859
- inputs=[
860
- first_image, last_image, input_audio, prompt, duration, gpu_duration,
861
- enhance_prompt, seed, randomize_seed, height, width,
862
- pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength,
863
- ],
864
- )
865
-
866
- first_image.change(
867
- fn=on_image_upload,
868
- inputs=[first_image, last_image, high_res],
869
- outputs=[width, height],
870
- )
871
-
872
- last_image.change(
873
- fn=on_image_upload,
874
- inputs=[first_image, last_image, high_res],
875
- outputs=[width, height],
876
- )
877
-
878
- high_res.change(
879
- fn=on_highres_toggle,
880
- inputs=[first_image, last_image, high_res],
881
- outputs=[width, height],
882
- )
883
-
884
- generate_btn.click(
885
- fn=generate_video,
886
- inputs=[
887
- first_image, last_image, input_audio, prompt, duration, gpu_duration, enhance_prompt,
888
- seed, randomize_seed, height, width,
889
- pose_strength, general_strength, motion_strength, dreamlay_strength, mself_strength, dramatic_strength, fluid_strength, liquid_strength, demopose_strength, voice_strength, realism_strength, transition_strength,
890
- ],
891
- outputs=[output_video, seed],
892
- )
893
-
894
-
895
- css = """
896
- .fillable{max-width: 1200px !important}
897
- """
898
-
899
- if __name__ == "__main__":
900
- demo.launch(theme=gr.themes.Citrus(), css=css)