dagloop5 commited on
Commit
56188de
·
verified ·
1 Parent(s): 8fa2949

Delete app.py

Browse files
Files changed (1) hide show
  1. app.py +0 -788
app.py DELETED
@@ -1,788 +0,0 @@
1
- import os
2
- import subprocess
3
- import sys
4
-
5
- # Disable torch.compile / dynamo before any torch import
6
- os.environ["TORCH_COMPILE_DISABLE"] = "1"
7
- os.environ["TORCHDYNAMO_DISABLE"] = "1"
8
-
9
- # Install xformers for memory-efficient attention
10
- subprocess.run([sys.executable, "-m", "pip", "install", "xformers==0.0.32.post2", "--no-build-isolation"], check=False)
11
-
12
- # Clone LTX-2 repo and install packages
13
- LTX_REPO_URL = "https://github.com/Lightricks/LTX-2.git"
14
- LTX_REPO_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), "LTX-2")
15
-
16
- if not os.path.exists(LTX_REPO_DIR):
17
- print(f"Cloning {LTX_REPO_URL}...")
18
- subprocess.run(["git", "clone", "--depth", "1", LTX_REPO_URL, LTX_REPO_DIR], check=True)
19
-
20
- print("Installing ltx-core and ltx-pipelines from cloned repo...")
21
- subprocess.run(
22
- [sys.executable, "-m", "pip", "install", "--force-reinstall", "--no-deps", "-e",
23
- os.path.join(LTX_REPO_DIR, "packages", "ltx-core"),
24
- "-e", os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines")],
25
- check=True,
26
- )
27
-
28
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-pipelines", "src"))
29
- sys.path.insert(0, os.path.join(LTX_REPO_DIR, "packages", "ltx-core", "src"))
30
-
31
- import logging
32
- import random
33
- import tempfile
34
- from pathlib import Path
35
- import gc
36
- import hashlib
37
-
38
- import torch
39
- torch._dynamo.config.suppress_errors = True
40
- torch._dynamo.config.disable = True
41
-
42
- import spaces
43
- import gradio as gr
44
- import numpy as np
45
- from huggingface_hub import hf_hub_download, snapshot_download
46
-
47
- from ltx_core.components.diffusion_steps import EulerDiffusionStep
48
- from ltx_core.components.noisers import GaussianNoiser
49
- from ltx_core.model.audio_vae import encode_audio as vae_encode_audio
50
- from ltx_core.model.upsampler import upsample_video
51
- from ltx_core.model.video_vae import TilingConfig, get_video_chunks_number
52
- from ltx_core.quantization import QuantizationPolicy
53
- from ltx_core.types import Audio, AudioLatentShape, VideoPixelShape
54
- from ltx_pipelines.distilled import DistilledPipeline
55
- from ltx_pipelines.utils import euler_denoising_loop
56
- from ltx_pipelines.utils.args import ImageConditioningInput
57
- from ltx_pipelines.utils.constants import DISTILLED_SIGMA_VALUES, STAGE_2_DISTILLED_SIGMA_VALUES
58
- from ltx_pipelines.utils.helpers import (
59
- cleanup_memory,
60
- combined_image_conditionings,
61
- denoise_video_only,
62
- encode_prompts,
63
- simple_denoising_func,
64
- )
65
- from ltx_pipelines.utils.media_io import decode_audio_from_file, encode_video
66
- from ltx_core.loader.primitives import LoraPathStrengthAndSDOps
67
- from ltx_core.loader.sd_ops import LTXV_LORA_COMFY_RENAMING_MAP
68
-
69
- # Force-patch xformers attention into the LTX attention module.
70
- from ltx_core.model.transformer import attention as _attn_mod
71
- print(f"[ATTN] Before patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
72
- try:
73
- from xformers.ops import memory_efficient_attention as _mea
74
- _attn_mod.memory_efficient_attention = _mea
75
- print(f"[ATTN] After patch: memory_efficient_attention={_attn_mod.memory_efficient_attention}")
76
- except Exception as e:
77
- print(f"[ATTN] xformers patch FAILED: {type(e).__name__}: {e}")
78
-
79
- logging.getLogger().setLevel(logging.INFO)
80
-
81
- MAX_SEED = np.iinfo(np.int32).max
82
- DEFAULT_PROMPT = (
83
- "An astronaut hatches from a fragile egg on the surface of the Moon, "
84
- "the shell cracking and peeling apart in gentle low-gravity motion. "
85
- "Fine lunar dust lifts and drifts outward with each movement, floating "
86
- "in slow arcs before settling back onto the ground."
87
- )
88
- DEFAULT_FRAME_RATE = 24.0
89
-
90
- # Resolution presets: (width, height)
91
- RESOLUTIONS = {
92
- "high": {"16:9": (1536, 1024), "9:16": (1024, 1536), "1:1": (1024, 1024)},
93
- "low": {"16:9": (768, 512), "9:16": (512, 768), "1:1": (768, 768)},
94
- }
95
-
96
-
97
- class LTX23DistilledA2VPipeline(DistilledPipeline):
98
- """DistilledPipeline with optional audio conditioning."""
99
-
100
- def __call__(
101
- self,
102
- prompt: str,
103
- seed: int,
104
- height: int,
105
- width: int,
106
- num_frames: int,
107
- frame_rate: float,
108
- images: list[ImageConditioningInput],
109
- audio_path: str | None = None,
110
- tiling_config: TilingConfig | None = None,
111
- enhance_prompt: bool = False,
112
- ):
113
- # Standard path when no audio input is provided.
114
- print(prompt)
115
- if audio_path is None:
116
- return super().__call__(
117
- prompt=prompt,
118
- seed=seed,
119
- height=height,
120
- width=width,
121
- num_frames=num_frames,
122
- frame_rate=frame_rate,
123
- images=images,
124
- tiling_config=tiling_config,
125
- enhance_prompt=enhance_prompt,
126
- )
127
-
128
- generator = torch.Generator(device=self.device).manual_seed(seed)
129
- noiser = GaussianNoiser(generator=generator)
130
- stepper = EulerDiffusionStep()
131
- dtype = torch.bfloat16
132
-
133
- (ctx_p,) = encode_prompts(
134
- [prompt],
135
- self.model_ledger,
136
- enhance_first_prompt=enhance_prompt,
137
- enhance_prompt_image=images[0].path if len(images) > 0 else None,
138
- )
139
- video_context, audio_context = ctx_p.video_encoding, ctx_p.audio_encoding
140
-
141
- video_duration = num_frames / frame_rate
142
- decoded_audio = decode_audio_from_file(audio_path, self.device, 0.0, video_duration)
143
- if decoded_audio is None:
144
- raise ValueError(f"Could not extract audio stream from {audio_path}")
145
-
146
- encoded_audio_latent = vae_encode_audio(decoded_audio, self.model_ledger.audio_encoder())
147
- audio_shape = AudioLatentShape.from_duration(batch=1, duration=video_duration, channels=8, mel_bins=16)
148
- expected_frames = audio_shape.frames
149
- actual_frames = encoded_audio_latent.shape[2]
150
-
151
- if actual_frames > expected_frames:
152
- encoded_audio_latent = encoded_audio_latent[:, :, :expected_frames, :]
153
- elif actual_frames < expected_frames:
154
- pad = torch.zeros(
155
- encoded_audio_latent.shape[0],
156
- encoded_audio_latent.shape[1],
157
- expected_frames - actual_frames,
158
- encoded_audio_latent.shape[3],
159
- device=encoded_audio_latent.device,
160
- dtype=encoded_audio_latent.dtype,
161
- )
162
- encoded_audio_latent = torch.cat([encoded_audio_latent, pad], dim=2)
163
-
164
- video_encoder = self.model_ledger.video_encoder()
165
- transformer = self.model_ledger.transformer()
166
- stage_1_sigmas = torch.tensor(DISTILLED_SIGMA_VALUES, device=self.device)
167
-
168
- def denoising_loop(sigmas, video_state, audio_state, stepper):
169
- return euler_denoising_loop(
170
- sigmas=sigmas,
171
- video_state=video_state,
172
- audio_state=audio_state,
173
- stepper=stepper,
174
- denoise_fn=simple_denoising_func(
175
- video_context=video_context,
176
- audio_context=audio_context,
177
- transformer=transformer,
178
- ),
179
- )
180
-
181
- stage_1_output_shape = VideoPixelShape(
182
- batch=1,
183
- frames=num_frames,
184
- width=width // 2,
185
- height=height // 2,
186
- fps=frame_rate,
187
- )
188
- stage_1_conditionings = combined_image_conditionings(
189
- images=images,
190
- height=stage_1_output_shape.height,
191
- width=stage_1_output_shape.width,
192
- video_encoder=video_encoder,
193
- dtype=dtype,
194
- device=self.device,
195
- )
196
- video_state = denoise_video_only(
197
- output_shape=stage_1_output_shape,
198
- conditionings=stage_1_conditionings,
199
- noiser=noiser,
200
- sigmas=stage_1_sigmas,
201
- stepper=stepper,
202
- denoising_loop_fn=denoising_loop,
203
- components=self.pipeline_components,
204
- dtype=dtype,
205
- device=self.device,
206
- initial_audio_latent=encoded_audio_latent,
207
- )
208
-
209
- torch.cuda.synchronize()
210
- cleanup_memory()
211
-
212
- upscaled_video_latent = upsample_video(
213
- latent=video_state.latent[:1],
214
- video_encoder=video_encoder,
215
- upsampler=self.model_ledger.spatial_upsampler(),
216
- )
217
- stage_2_sigmas = torch.tensor(STAGE_2_DISTILLED_SIGMA_VALUES, device=self.device)
218
- stage_2_output_shape = VideoPixelShape(batch=1, frames=num_frames, width=width, height=height, fps=frame_rate)
219
- stage_2_conditionings = combined_image_conditionings(
220
- images=images,
221
- height=stage_2_output_shape.height,
222
- width=stage_2_output_shape.width,
223
- video_encoder=video_encoder,
224
- dtype=dtype,
225
- device=self.device,
226
- )
227
- video_state = denoise_video_only(
228
- output_shape=stage_2_output_shape,
229
- conditionings=stage_2_conditionings,
230
- noiser=noiser,
231
- sigmas=stage_2_sigmas,
232
- stepper=stepper,
233
- denoising_loop_fn=denoising_loop,
234
- components=self.pipeline_components,
235
- dtype=dtype,
236
- device=self.device,
237
- noise_scale=stage_2_sigmas[0],
238
- initial_video_latent=upscaled_video_latent,
239
- initial_audio_latent=encoded_audio_latent,
240
- )
241
-
242
- torch.cuda.synchronize()
243
- del transformer
244
- del video_encoder
245
- cleanup_memory()
246
-
247
- decoded_video = _video_decoder.decode_video(
248
- video_state.latent,
249
- tiling_config=tiling_config,
250
- generator=generator,
251
- )
252
- original_audio = Audio(
253
- waveform=decoded_audio.waveform.squeeze(0),
254
- sampling_rate=decoded_audio.sampling_rate,
255
- )
256
- return decoded_video, original_audio
257
-
258
-
259
- # Model repos
260
- LTX_MODEL_REPO = "Lightricks/LTX-2.3"
261
- GEMMA_REPO ="rahul7star/gemma-3-12b-it-heretic"
262
-
263
-
264
- # Download model checkpoints
265
- print("=" * 80)
266
- print("Downloading LTX-2.3 distilled model + Gemma...")
267
- print("=" * 80)
268
-
269
- # LoRA cache directory and currently-applied key
270
- LORA_CACHE_DIR = Path("lora_cache")
271
- LORA_CACHE_DIR.mkdir(exist_ok=True)
272
- current_lora_key: str | None = None
273
-
274
- PENDING_LORA_KEY: str | None = None
275
- PENDING_LORA_STATE: dict[str, torch.Tensor] | None = None
276
- PENDING_LORA_STATUS: str = "No LoRA state prepared yet."
277
-
278
- checkpoint_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-22b-distilled.safetensors")
279
- spatial_upsampler_path = hf_hub_download(repo_id=LTX_MODEL_REPO, filename="ltx-2.3-spatial-upscaler-x2-1.0.safetensors")
280
- gemma_root = snapshot_download(repo_id=GEMMA_REPO)
281
-
282
- # ---- Insert block (LoRA downloads) between lines 268 and 269 ----
283
- # LoRA repo + download the requested LoRA adapters
284
- LORA_REPO = "dagloop5/LoRA"
285
-
286
- print("=" * 80)
287
- print("Downloading LoRA adapters from dagloop5/LoRA...")
288
- print("=" * 80)
289
- pose_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="pose_enhancer.safetensors")
290
- general_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="ltx23__demopose_d3m0p0s3.safetensors")
291
- motion_lora_path = hf_hub_download(repo_id=LORA_REPO, filename="motion_helper.safetensors")
292
-
293
- print(f"Pose LoRA: {pose_lora_path}")
294
- print(f"General LoRA: {general_lora_path}")
295
- print(f"Motion LoRA: {motion_lora_path}")
296
- # ----------------------------------------------------------------
297
-
298
- print(f"Checkpoint: {checkpoint_path}")
299
- print(f"Spatial upsampler: {spatial_upsampler_path}")
300
- print(f"Gemma root: {gemma_root}")
301
-
302
- # Initialize pipeline WITH text encoder and optional audio support
303
- # ---- Replace block (pipeline init) lines 275-281 ----
304
- pipeline = LTX23DistilledA2VPipeline(
305
- distilled_checkpoint_path=checkpoint_path,
306
- spatial_upsampler_path=spatial_upsampler_path,
307
- gemma_root=gemma_root,
308
- loras=[],
309
- quantization=QuantizationPolicy.fp8_cast(), # keep FP8 quantization unchanged
310
- )
311
- # ----------------------------------------------------------------
312
-
313
- def _make_lora_key(pose_strength: float, general_strength: float, motion_strength: float) -> tuple[str, str]:
314
- rp = round(float(pose_strength), 2)
315
- rg = round(float(general_strength), 2)
316
- rm = round(float(motion_strength), 2)
317
- key_str = f"{pose_lora_path}:{rp}|{general_lora_path}:{rg}|{motion_lora_path}:{rm}"
318
- key = hashlib.sha256(key_str.encode("utf-8")).hexdigest()
319
- return key, key_str
320
-
321
-
322
- def prepare_lora_cache(
323
- pose_strength: float,
324
- general_strength: float,
325
- motion_strength: float,
326
- progress=gr.Progress(track_tqdm=True),
327
- ):
328
- """
329
- CPU-only step:
330
- - checks cache
331
- - loads cached fused transformer state_dict, or
332
- - builds fused transformer on CPU and saves it
333
- The resulting state_dict is stored in memory and can be applied later.
334
- """
335
- global PENDING_LORA_KEY, PENDING_LORA_STATE, PENDING_LORA_STATUS
336
-
337
- ledger = pipeline.model_ledger
338
- key, _ = _make_lora_key(pose_strength, general_strength, motion_strength)
339
- cache_path = LORA_CACHE_DIR / f"{key}.pt"
340
-
341
- progress(0.05, desc="Preparing LoRA state")
342
- if cache_path.exists():
343
- try:
344
- progress(0.20, desc="Loading cached fused state")
345
- state = torch.load(cache_path, map_location="cpu")
346
- PENDING_LORA_KEY = key
347
- PENDING_LORA_STATE = state
348
- PENDING_LORA_STATUS = f"Loaded cached LoRA state: {cache_path.name}"
349
- return PENDING_LORA_STATUS
350
- except Exception as e:
351
- print(f"[LoRA] Cache load failed: {type(e).__name__}: {e}")
352
-
353
- entries = [
354
- (pose_lora_path, round(float(pose_strength), 2)),
355
- (general_lora_path, round(float(general_strength), 2)),
356
- (motion_lora_path, round(float(motion_strength), 2)),
357
- ]
358
- loras_for_builder = [
359
- LoraPathStrengthAndSDOps(path, strength, LTXV_LORA_COMFY_RENAMING_MAP)
360
- for path, strength in entries
361
- if path is not None and float(strength) != 0.0
362
- ]
363
-
364
- if not loras_for_builder:
365
- PENDING_LORA_KEY = None
366
- PENDING_LORA_STATE = None
367
- PENDING_LORA_STATUS = "No non-zero LoRA strengths selected; nothing to prepare."
368
- return PENDING_LORA_STATUS
369
-
370
- tmp_ledger = None
371
- new_transformer_cpu = None
372
- try:
373
- progress(0.35, desc="Building fused CPU transformer")
374
- tmp_ledger = ledger.with_loras(tuple(loras_for_builder))
375
-
376
- orig_tmp_target = getattr(tmp_ledger, "_target_device", None)
377
- orig_tmp_device = getattr(tmp_ledger, "device", None)
378
- try:
379
- tmp_ledger._target_device = lambda: torch.device("cpu")
380
- tmp_ledger.device = torch.device("cpu")
381
- new_transformer_cpu = tmp_ledger.transformer()
382
- finally:
383
- if orig_tmp_target is not None:
384
- tmp_ledger._target_device = orig_tmp_target
385
- else:
386
- try:
387
- delattr(tmp_ledger, "_target_device")
388
- except Exception:
389
- pass
390
- if orig_tmp_device is not None:
391
- tmp_ledger.device = orig_tmp_device
392
- else:
393
- try:
394
- delattr(tmp_ledger, "device")
395
- except Exception:
396
- pass
397
-
398
- progress(0.70, desc="Extracting fused state_dict")
399
- state = new_transformer_cpu.state_dict()
400
- torch.save(state, cache_path)
401
-
402
- PENDING_LORA_KEY = key
403
- PENDING_LORA_STATE = state
404
- PENDING_LORA_STATUS = f"Built and cached LoRA state: {cache_path.name}"
405
- return PENDING_LORA_STATUS
406
-
407
- except Exception as e:
408
- import traceback
409
- print(f"[LoRA] Prepare failed: {type(e).__name__}: {e}")
410
- print(traceback.format_exc())
411
- PENDING_LORA_KEY = None
412
- PENDING_LORA_STATE = None
413
- PENDING_LORA_STATUS = f"LoRA prepare failed: {type(e).__name__}: {e}"
414
- return PENDING_LORA_STATUS
415
-
416
- finally:
417
- try:
418
- del new_transformer_cpu
419
- except Exception:
420
- pass
421
- try:
422
- del tmp_ledger
423
- except Exception:
424
- pass
425
- gc.collect()
426
-
427
-
428
- def apply_prepared_lora_state_to_pipeline():
429
- """
430
- Fast step: copy the already prepared CPU state into the live transformer.
431
- This is the only part that should remain near generation time.
432
- """
433
- global current_lora_key, PENDING_LORA_KEY, PENDING_LORA_STATE
434
-
435
- if PENDING_LORA_STATE is None or PENDING_LORA_KEY is None:
436
- print("[LoRA] No prepared LoRA state available; skipping.")
437
- return False
438
-
439
- if current_lora_key == PENDING_LORA_KEY:
440
- print("[LoRA] Prepared LoRA state already active; skipping.")
441
- return True
442
-
443
- existing_transformer = _transformer
444
- existing_params = {name: param for name, param in existing_transformer.named_parameters()}
445
- existing_buffers = {name: buf for name, buf in existing_transformer.named_buffers()}
446
-
447
- with torch.no_grad():
448
- for k, v in PENDING_LORA_STATE.items():
449
- if k in existing_params:
450
- existing_params[k].data.copy_(v.to(existing_params[k].device))
451
- elif k in existing_buffers:
452
- existing_buffers[k].data.copy_(v.to(existing_buffers[k].device))
453
-
454
- current_lora_key = PENDING_LORA_KEY
455
- print("[LoRA] Prepared LoRA state applied to the pipeline.")
456
- return True
457
-
458
- # ---- REPLACE PRELOAD BLOCK START ----
459
- # Preload all models for ZeroGPU tensor packing.
460
- print("Preloading all models (including Gemma and audio components)...")
461
- ledger = pipeline.model_ledger
462
-
463
- # Save the original factory methods so we can rebuild individual components later.
464
- # These are bound callables on ledger that will call the builder when invoked.
465
- _orig_transformer_factory = ledger.transformer
466
- _orig_video_encoder_factory = ledger.video_encoder
467
- _orig_video_decoder_factory = ledger.video_decoder
468
- _orig_audio_encoder_factory = ledger.audio_encoder
469
- _orig_audio_decoder_factory = ledger.audio_decoder
470
- _orig_vocoder_factory = ledger.vocoder
471
- _orig_spatial_upsampler_factory = ledger.spatial_upsampler
472
- _orig_text_encoder_factory = ledger.text_encoder
473
- _orig_gemma_embeddings_factory = ledger.gemma_embeddings_processor
474
-
475
- # Call the original factories once to create the cached instances we will serve by default.
476
- _transformer = _orig_transformer_factory()
477
- _video_encoder = _orig_video_encoder_factory()
478
- _video_decoder = _orig_video_decoder_factory()
479
- _audio_encoder = _orig_audio_encoder_factory()
480
- _audio_decoder = _orig_audio_decoder_factory()
481
- _vocoder = _orig_vocoder_factory()
482
- _spatial_upsampler = _orig_spatial_upsampler_factory()
483
- _text_encoder = _orig_text_encoder_factory()
484
- _embeddings_processor = _orig_gemma_embeddings_factory()
485
-
486
- # Replace ledger methods with lightweight lambdas that return the cached instances.
487
- # We keep the original factories above so we can call them later to rebuild components.
488
- ledger.transformer = lambda: _transformer
489
- ledger.video_encoder = lambda: _video_encoder
490
- ledger.video_decoder = lambda: _video_decoder
491
- ledger.audio_encoder = lambda: _audio_encoder
492
- ledger.audio_decoder = lambda: _audio_decoder
493
- ledger.vocoder = lambda: _vocoder
494
- ledger.spatial_upsampler = lambda: _spatial_upsampler
495
- ledger.text_encoder = lambda: _text_encoder
496
- ledger.gemma_embeddings_processor = lambda: _embeddings_processor
497
-
498
- print("All models preloaded (including Gemma text encoder and audio encoder)!")
499
- # ---- REPLACE PRELOAD BLOCK END ----
500
-
501
- print("=" * 80)
502
- print("Pipeline ready!")
503
- print("=" * 80)
504
-
505
-
506
- def log_memory(tag: str):
507
- if torch.cuda.is_available():
508
- allocated = torch.cuda.memory_allocated() / 1024**3
509
- peak = torch.cuda.max_memory_allocated() / 1024**3
510
- free, total = torch.cuda.mem_get_info()
511
- print(f"[VRAM {tag}] allocated={allocated:.2f}GB peak={peak:.2f}GB free={free / 1024**3:.2f}GB total={total / 1024**3:.2f}GB")
512
-
513
-
514
- def detect_aspect_ratio(image) -> str:
515
- if image is None:
516
- return "16:9"
517
- if hasattr(image, "size"):
518
- w, h = image.size
519
- elif hasattr(image, "shape"):
520
- h, w = image.shape[:2]
521
- else:
522
- return "16:9"
523
- ratio = w / h
524
- candidates = {"16:9": 16 / 9, "9:16": 9 / 16, "1:1": 1.0}
525
- return min(candidates, key=lambda k: abs(ratio - candidates[k]))
526
-
527
-
528
- def on_image_upload(first_image, last_image, high_res):
529
- ref_image = first_image if first_image is not None else last_image
530
- aspect = detect_aspect_ratio(ref_image)
531
- tier = "high" if high_res else "low"
532
- w, h = RESOLUTIONS[tier][aspect]
533
- return gr.update(value=w), gr.update(value=h)
534
-
535
-
536
- def on_highres_toggle(first_image, last_image, high_res):
537
- ref_image = first_image if first_image is not None else last_image
538
- aspect = detect_aspect_ratio(ref_image)
539
- tier = "high" if high_res else "low"
540
- w, h = RESOLUTIONS[tier][aspect]
541
- return gr.update(value=w), gr.update(value=h)
542
-
543
-
544
- def get_gpu_duration(
545
- first_image,
546
- last_image,
547
- input_audio,
548
- prompt: str,
549
- duration: float,
550
- gpu_duration: float,
551
- enhance_prompt: bool = True,
552
- seed: int = 42,
553
- randomize_seed: bool = True,
554
- height: int = 1024,
555
- width: int = 1536,
556
- pose_strength: float = 0.0,
557
- general_strength: float = 0.0,
558
- motion_strength: float = 0.0,
559
- progress=None,
560
- ):
561
- return int(gpu_duration)
562
-
563
- @spaces.GPU(duration=get_gpu_duration)
564
- @torch.inference_mode()
565
- def generate_video(
566
- first_image,
567
- last_image,
568
- input_audio,
569
- prompt: str,
570
- duration: float,
571
- gpu_duration: float,
572
- enhance_prompt: bool = True,
573
- seed: int = 42,
574
- randomize_seed: bool = True,
575
- height: int = 1024,
576
- width: int = 1536,
577
- pose_strength: float = 0.0,
578
- general_strength: float = 0.0,
579
- motion_strength: float = 0.0,
580
- progress=gr.Progress(track_tqdm=True),
581
- ):
582
- try:
583
- torch.cuda.reset_peak_memory_stats()
584
- log_memory("start")
585
-
586
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
587
-
588
- frame_rate = DEFAULT_FRAME_RATE
589
- num_frames = int(duration * frame_rate) + 1
590
- num_frames = ((num_frames - 1 + 7) // 8) * 8 + 1
591
-
592
- print(f"Generating: {height}x{width}, {num_frames} frames ({duration}s), seed={current_seed}")
593
-
594
- images = []
595
- output_dir = Path("outputs")
596
- output_dir.mkdir(exist_ok=True)
597
-
598
- if first_image is not None:
599
- temp_first_path = output_dir / f"temp_first_{current_seed}.jpg"
600
- if hasattr(first_image, "save"):
601
- first_image.save(temp_first_path)
602
- else:
603
- temp_first_path = Path(first_image)
604
- images.append(ImageConditioningInput(path=str(temp_first_path), frame_idx=0, strength=1.0))
605
-
606
- if last_image is not None:
607
- temp_last_path = output_dir / f"temp_last_{current_seed}.jpg"
608
- if hasattr(last_image, "save"):
609
- last_image.save(temp_last_path)
610
- else:
611
- temp_last_path = Path(last_image)
612
- images.append(ImageConditioningInput(path=str(temp_last_path), frame_idx=num_frames - 1, strength=1.0))
613
-
614
- tiling_config = TilingConfig.default()
615
- video_chunks_number = get_video_chunks_number(num_frames, tiling_config)
616
-
617
- log_memory("before pipeline call")
618
-
619
- apply_prepared_lora_state_to_pipeline()
620
-
621
- video, audio = pipeline(
622
- prompt=prompt,
623
- seed=current_seed,
624
- height=int(height),
625
- width=int(width),
626
- num_frames=num_frames,
627
- frame_rate=frame_rate,
628
- images=images,
629
- audio_path=input_audio,
630
- tiling_config=tiling_config,
631
- enhance_prompt=enhance_prompt,
632
- )
633
-
634
- log_memory("after pipeline call")
635
-
636
- output_path = tempfile.mktemp(suffix=".mp4")
637
- encode_video(
638
- video=video,
639
- fps=frame_rate,
640
- audio=audio,
641
- output_path=output_path,
642
- video_chunks_number=video_chunks_number,
643
- )
644
-
645
- log_memory("after encode_video")
646
- return str(output_path), current_seed
647
-
648
- except Exception as e:
649
- import traceback
650
- log_memory("on error")
651
- print(f"Error: {str(e)}\n{traceback.format_exc()}")
652
- return None, current_seed
653
-
654
-
655
- with gr.Blocks(title="LTX-2.3 Heretic Distilled") as demo:
656
- gr.Markdown("# LTX-2.3 F2LF:Heretic with Fast Audio-Video Generation with Frame Conditioning")
657
-
658
-
659
- with gr.Row():
660
- with gr.Column():
661
- with gr.Row():
662
- first_image = gr.Image(label="First Frame (Optional)", type="pil")
663
- last_image = gr.Image(label="Last Frame (Optional)", type="pil")
664
- input_audio = gr.Audio(label="Audio Input (Optional)", type="filepath")
665
- prompt = gr.Textbox(
666
- label="Prompt",
667
- info="for best results - make it as elaborate as possible",
668
- value="Make this image come alive with cinematic motion, smooth animation",
669
- lines=3,
670
- placeholder="Describe the motion and animation you want...",
671
- )
672
- duration = gr.Slider(label="Duration (seconds)", minimum=1.0, maximum=30.0, value=10.0, step=0.1)
673
-
674
-
675
- generate_btn = gr.Button("Generate Video", variant="primary", size="lg")
676
-
677
- with gr.Accordion("Advanced Settings", open=False):
678
- seed = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, value=10, step=1)
679
- randomize_seed = gr.Checkbox(label="Randomize Seed", value=True)
680
- with gr.Row():
681
- width = gr.Number(label="Width", value=1536, precision=0)
682
- height = gr.Number(label="Height", value=1024, precision=0)
683
- with gr.Row():
684
- enhance_prompt = gr.Checkbox(label="Enhance Prompt", value=False)
685
- high_res = gr.Checkbox(label="High Resolution", value=True)
686
- with gr.Column():
687
- gr.Markdown("### LoRA adapter strengths (set to 0 to disable)")
688
- pose_strength = gr.Slider(
689
- label="Pose Enhancer strength",
690
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
691
- )
692
- general_strength = gr.Slider(
693
- label="General Enhancer strength",
694
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
695
- )
696
- motion_strength = gr.Slider(
697
- label="Motion Helper strength",
698
- minimum=0.0, maximum=2.0, value=0.0, step=0.01
699
- )
700
- prepare_lora_btn = gr.Button("Prepare / Load LoRA Cache", variant="secondary")
701
- lora_status = gr.Textbox(
702
- label="LoRA Cache Status",
703
- value="No LoRA state prepared yet.",
704
- interactive=False,
705
- )
706
-
707
- with gr.Column():
708
- output_video = gr.Video(label="Generated Video", autoplay=False)
709
- gpu_duration = gr.Slider(
710
- label="ZeroGPU duration (seconds)",
711
- minimum=40.0,
712
- maximum=240.0,
713
- value=85.0,
714
- step=1.0,
715
- )
716
-
717
- gr.Examples(
718
- examples=[
719
- [
720
- None,
721
- "pinkknit.jpg",
722
- None,
723
- "The camera falls downward through darkness as if dropped into a tunnel. "
724
- "As it slows, five friends wearing pink knitted hats and sunglasses lean "
725
- "over and look down toward the camera with curious expressions. The lens "
726
- "has a strong fisheye effect, creating a circular frame around them. They "
727
- "crowd together closely, forming a symmetrical cluster while staring "
728
- "directly into the lens.",
729
- 3.0,
730
- 80.0,
731
- False,
732
- 42,
733
- True,
734
- 1024,
735
- 1024,
736
- 0.0, # pose_strength (example)
737
- 0.0, # general_strength (example)
738
- 0.0, # motion_strength (example)
739
- ],
740
- ],
741
- inputs=[
742
- first_image, last_image, input_audio, prompt, duration, gpu_duration,
743
- enhance_prompt, seed, randomize_seed, height, width,
744
- pose_strength, general_strength, motion_strength,
745
- ],
746
- )
747
-
748
- first_image.change(
749
- fn=on_image_upload,
750
- inputs=[first_image, last_image, high_res],
751
- outputs=[width, height],
752
- )
753
-
754
- last_image.change(
755
- fn=on_image_upload,
756
- inputs=[first_image, last_image, high_res],
757
- outputs=[width, height],
758
- )
759
-
760
- high_res.change(
761
- fn=on_highres_toggle,
762
- inputs=[first_image, last_image, high_res],
763
- outputs=[width, height],
764
- )
765
-
766
- prepare_lora_btn.click(
767
- fn=prepare_lora_cache,
768
- inputs=[pose_strength, general_strength, motion_strength],
769
- outputs=[lora_status],
770
- )
771
-
772
- generate_btn.click(
773
- fn=generate_video,
774
- inputs=[
775
- first_image, last_image, input_audio, prompt, duration, gpu_duration, enhance_prompt,
776
- seed, randomize_seed, height, width,
777
- pose_strength, general_strength, motion_strength,
778
- ],
779
- outputs=[output_video, seed],
780
- )
781
-
782
-
783
- css = """
784
- .fillable{max-width: 1200px !important}
785
- """
786
-
787
- if __name__ == "__main__":
788
- demo.launch(theme=gr.themes.Citrus(), css=css)