Fabrice-TIERCELIN commited on
Commit
f7b089b
·
verified ·
1 Parent(s): eb6ac03

Delete app_v2v.py

Browse files
Files changed (1) hide show
  1. app_v2v.py +0 -1136
app_v2v.py DELETED
@@ -1,1136 +0,0 @@
1
- from diffusers_helper.hf_login import login
2
-
3
- import os
4
-
5
- os.environ['HF_HOME'] = os.path.abspath(os.path.realpath(os.path.join(os.path.dirname(__file__), './hf_download')))
6
-
7
- import spaces
8
- import gradio as gr
9
- import torch
10
- import traceback
11
- import einops
12
- import safetensors.torch as sf
13
- import numpy as np
14
- import argparse
15
- import random
16
- import math
17
- # 20250506 pftq: Added for video input loading
18
- import decord
19
- # 20250506 pftq: Added for progress bars in video_encode
20
- from tqdm import tqdm
21
- # 20250506 pftq: Normalize file paths for Windows compatibility
22
- import pathlib
23
- # 20250506 pftq: for easier to read timestamp
24
- from datetime import datetime
25
- # 20250508 pftq: for saving prompt to mp4 comments metadata
26
- import imageio_ffmpeg
27
- import tempfile
28
- import shutil
29
- import subprocess
30
-
31
- from PIL import Image
32
- from diffusers import AutoencoderKLHunyuanVideo
33
- from transformers import LlamaModel, CLIPTextModel, LlamaTokenizerFast, CLIPTokenizer
34
- from diffusers_helper.hunyuan import encode_prompt_conds, vae_decode, vae_encode, vae_decode_fake
35
- from diffusers_helper.utils import save_bcthw_as_mp4, crop_or_pad_yield_mask, soft_append_bcthw, resize_and_center_crop, state_dict_weighted_merge, state_dict_offset_merge, generate_timestamp
36
- from diffusers_helper.models.hunyuan_video_packed import HunyuanVideoTransformer3DModelPacked
37
- from diffusers_helper.pipelines.k_diffusion_hunyuan import sample_hunyuan
38
- from diffusers_helper.memory import cpu, gpu, get_cuda_free_memory_gb, move_model_to_device_with_memory_preservation, offload_model_from_device_for_memory_preservation, fake_diffusers_current_device, DynamicSwapInstaller, unload_complete_models, load_model_as_complete
39
- from diffusers_helper.thread_utils import AsyncStream, async_run
40
- from diffusers_helper.gradio.progress_bar import make_progress_bar_css, make_progress_bar_html
41
- from transformers import SiglipImageProcessor, SiglipVisionModel
42
- from diffusers_helper.clip_vision import hf_clip_vision_encode
43
- from diffusers_helper.bucket_tools import find_nearest_bucket
44
- from diffusers import BitsAndBytesConfig as DiffusersBitsAndBytesConfig, HunyuanVideoTransformer3DModel, HunyuanVideoPipeline
45
-
46
- if torch.cuda.device_count() > 0:
47
- free_mem_gb = get_cuda_free_memory_gb(gpu)
48
- high_vram = free_mem_gb > 60
49
-
50
- print(f'Free VRAM {free_mem_gb} GB')
51
- print(f'High-VRAM Mode: {high_vram}')
52
-
53
- text_encoder = LlamaModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder', torch_dtype=torch.float16).cpu()
54
- text_encoder_2 = CLIPTextModel.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='text_encoder_2', torch_dtype=torch.float16).cpu()
55
- tokenizer = LlamaTokenizerFast.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer')
56
- tokenizer_2 = CLIPTokenizer.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='tokenizer_2')
57
- vae = AutoencoderKLHunyuanVideo.from_pretrained("hunyuanvideo-community/HunyuanVideo", subfolder='vae', torch_dtype=torch.float16).cpu()
58
-
59
- feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='feature_extractor')
60
- image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
61
-
62
- transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('lllyasviel/FramePack_F1_I2V_HY_20250503', torch_dtype=torch.bfloat16).cpu()
63
-
64
- vae.eval()
65
- text_encoder.eval()
66
- text_encoder_2.eval()
67
- image_encoder.eval()
68
- transformer.eval()
69
-
70
- if not high_vram:
71
- vae.enable_slicing()
72
- vae.enable_tiling()
73
-
74
- transformer.high_quality_fp32_output_for_inference = True
75
- print('transformer.high_quality_fp32_output_for_inference = True')
76
-
77
- transformer.to(dtype=torch.bfloat16)
78
- vae.to(dtype=torch.float16)
79
- image_encoder.to(dtype=torch.float16)
80
- text_encoder.to(dtype=torch.float16)
81
- text_encoder_2.to(dtype=torch.float16)
82
-
83
- vae.requires_grad_(False)
84
- text_encoder.requires_grad_(False)
85
- text_encoder_2.requires_grad_(False)
86
- image_encoder.requires_grad_(False)
87
- transformer.requires_grad_(False)
88
-
89
- if not high_vram:
90
- # DynamicSwapInstaller is same as huggingface's enable_sequential_offload but 3x faster
91
- DynamicSwapInstaller.install_model(transformer, device=gpu)
92
- DynamicSwapInstaller.install_model(text_encoder, device=gpu)
93
- else:
94
- text_encoder.to(gpu)
95
- text_encoder_2.to(gpu)
96
- image_encoder.to(gpu)
97
- vae.to(gpu)
98
- transformer.to(gpu)
99
-
100
- stream = AsyncStream()
101
-
102
- outputs_folder = './outputs/'
103
- os.makedirs(outputs_folder, exist_ok=True)
104
-
105
- input_image_debug_value = input_video_debug_value = prompt_debug_value = total_second_length_debug_value = None
106
-
107
- @spaces.GPU()
108
- @torch.no_grad()
109
- def video_encode(video_path, resolution, no_resize, vae, vae_batch_size=16, device="cuda", width=None, height=None):
110
- """
111
- Encode a video into latent representations using the VAE.
112
-
113
- Args:
114
- video_path: Path to the input video file.
115
- vae: AutoencoderKLHunyuanVideo model.
116
- height, width: Target resolution for resizing frames.
117
- vae_batch_size: Number of frames to process per batch.
118
- device: Device for computation (e.g., "cuda").
119
-
120
- Returns:
121
- start_latent: Latent of the first frame (for compatibility with original code).
122
- input_image_np: First frame as numpy array (for CLIP vision encoding).
123
- history_latents: Latents of all frames (shape: [1, channels, frames, height//8, width//8]).
124
- fps: Frames per second of the input video.
125
- """
126
- # 20250506 pftq: Normalize video path for Windows compatibility
127
- video_path = str(pathlib.Path(video_path).resolve())
128
- print(f"Processing video: {video_path}")
129
-
130
- # 20250506 pftq: Check CUDA availability and fallback to CPU if needed
131
- if device == "cuda" and not torch.cuda.is_available():
132
- print("CUDA is not available, falling back to CPU")
133
- device = "cpu"
134
-
135
- try:
136
- # 20250506 pftq: Load video and get FPS
137
- print("Initializing VideoReader...")
138
- vr = decord.VideoReader(video_path)
139
- fps = vr.get_avg_fps() # Get input video FPS
140
- num_real_frames = len(vr)
141
- print(f"Video loaded: {num_real_frames} frames, FPS: {fps}")
142
-
143
- # Truncate to nearest latent size (multiple of 4)
144
- latent_size_factor = 4
145
- num_frames = (num_real_frames // latent_size_factor) * latent_size_factor
146
- if num_frames != num_real_frames:
147
- print(f"Truncating video from {num_real_frames} to {num_frames} frames for latent size compatibility")
148
- num_real_frames = num_frames
149
-
150
- # 20250506 pftq: Read frames
151
- print("Reading video frames...")
152
- frames = vr.get_batch(range(num_real_frames)).asnumpy() # Shape: (num_real_frames, height, width, channels)
153
- print(f"Frames read: {frames.shape}")
154
-
155
- # 20250506 pftq: Get native video resolution
156
- native_height, native_width = frames.shape[1], frames.shape[2]
157
- print(f"Native video resolution: {native_width}x{native_height}")
158
-
159
- # 20250506 pftq: Use native resolution if height/width not specified, otherwise use provided values
160
- target_height = native_height if height is None else height
161
- target_width = native_width if width is None else width
162
-
163
- # 20250506 pftq: Adjust to nearest bucket for model compatibility
164
- if not no_resize:
165
- target_height, target_width = find_nearest_bucket(target_height, target_width, resolution=resolution)
166
- print(f"Adjusted resolution: {target_width}x{target_height}")
167
- else:
168
- print(f"Using native resolution without resizing: {target_width}x{target_height}")
169
-
170
- # 20250506 pftq: Preprocess frames to match original image processing
171
- processed_frames = []
172
- for i, frame in enumerate(frames):
173
- #print(f"Preprocessing frame {i+1}/{num_frames}")
174
- frame_np = resize_and_center_crop(frame, target_width=target_width, target_height=target_height)
175
- processed_frames.append(frame_np)
176
- processed_frames = np.stack(processed_frames) # Shape: (num_real_frames, height, width, channels)
177
- print(f"Frames preprocessed: {processed_frames.shape}")
178
-
179
- # 20250506 pftq: Save first frame for CLIP vision encoding
180
- input_image_np = processed_frames[0]
181
-
182
- # 20250506 pftq: Convert to tensor and normalize to [-1, 1]
183
- print("Converting frames to tensor...")
184
- frames_pt = torch.from_numpy(processed_frames).float() / 127.5 - 1
185
- frames_pt = frames_pt.permute(0, 3, 1, 2) # Shape: (num_real_frames, channels, height, width)
186
- frames_pt = frames_pt.unsqueeze(0) # Shape: (1, num_real_frames, channels, height, width)
187
- frames_pt = frames_pt.permute(0, 2, 1, 3, 4) # Shape: (1, channels, num_real_frames, height, width)
188
- print(f"Tensor shape: {frames_pt.shape}")
189
-
190
- # 20250507 pftq: Save pixel frames for use in worker
191
- input_video_pixels = frames_pt.cpu()
192
-
193
- # 20250506 pftq: Move to device
194
- print(f"Moving tensor to device: {device}")
195
- frames_pt = frames_pt.to(device)
196
- print("Tensor moved to device")
197
-
198
- # 20250506 pftq: Move VAE to device
199
- print(f"Moving VAE to device: {device}")
200
- vae.to(device)
201
- print("VAE moved to device")
202
-
203
- # 20250506 pftq: Encode frames in batches
204
- print(f"Encoding input video frames in VAE batch size {vae_batch_size} (reduce if memory issues here or if forcing video resolution)")
205
- latents = []
206
- vae.eval()
207
- with torch.no_grad():
208
- for i in tqdm(range(0, frames_pt.shape[2], vae_batch_size), desc="Encoding video frames", mininterval=0.1):
209
- #print(f"Encoding batch {i//vae_batch_size + 1}: frames {i} to {min(i + vae_batch_size, frames_pt.shape[2])}")
210
- batch = frames_pt[:, :, i:i + vae_batch_size] # Shape: (1, channels, batch_size, height, width)
211
- try:
212
- # 20250506 pftq: Log GPU memory before encoding
213
- if device == "cuda":
214
- free_mem = torch.cuda.memory_allocated() / 1024**3
215
- #print(f"GPU memory before encoding: {free_mem:.2f} GB")
216
- batch_latent = vae_encode(batch, vae)
217
- # 20250506 pftq: Synchronize CUDA to catch issues
218
- if device == "cuda":
219
- torch.cuda.synchronize()
220
- #print(f"GPU memory after encoding: {torch.cuda.memory_allocated() / 1024**3:.2f} GB")
221
- latents.append(batch_latent)
222
- #print(f"Batch encoded, latent shape: {batch_latent.shape}")
223
- except RuntimeError as e:
224
- print(f"Error during VAE encoding: {str(e)}")
225
- if device == "cuda" and "out of memory" in str(e).lower():
226
- print("CUDA out of memory, try reducing vae_batch_size or using CPU")
227
- raise
228
-
229
- # 20250506 pftq: Concatenate latents
230
- print("Concatenating latents...")
231
- history_latents = torch.cat(latents, dim=2) # Shape: (1, channels, frames, height//8, width//8)
232
- print(f"History latents shape: {history_latents.shape}")
233
-
234
- # 20250506 pftq: Get first frame's latent
235
- start_latent = history_latents[:, :, :1] # Shape: (1, channels, 1, height//8, width//8)
236
- print(f"Start latent shape: {start_latent.shape}")
237
-
238
- # 20250506 pftq: Move VAE back to CPU to free GPU memory
239
- if device == "cuda":
240
- vae.to(cpu)
241
- torch.cuda.empty_cache()
242
- print("VAE moved back to CPU, CUDA cache cleared")
243
-
244
- return start_latent, input_image_np, history_latents, fps, target_height, target_width, input_video_pixels
245
-
246
- except Exception as e:
247
- print(f"Error in video_encode: {str(e)}")
248
- raise
249
-
250
- # 20250508 pftq: for saving prompt to mp4 metadata comments
251
- def set_mp4_comments_imageio_ffmpeg(input_file, comments):
252
- try:
253
- # Get the path to the bundled FFmpeg binary from imageio-ffmpeg
254
- ffmpeg_path = imageio_ffmpeg.get_ffmpeg_exe()
255
-
256
- # Check if input file exists
257
- if not os.path.exists(input_file):
258
- print(f"Error: Input file {input_file} does not exist")
259
- return False
260
-
261
- # Create a temporary file path
262
- temp_file = tempfile.NamedTemporaryFile(suffix='.mp4', delete=False).name
263
-
264
- # FFmpeg command using the bundled binary
265
- command = [
266
- ffmpeg_path, # Use imageio-ffmpeg's FFmpeg
267
- '-i', input_file, # input file
268
- '-metadata', f'comment={comments}', # set comment metadata
269
- '-c:v', 'copy', # copy video stream without re-encoding
270
- '-c:a', 'copy', # copy audio stream without re-encoding
271
- '-y', # overwrite output file if it exists
272
- temp_file # temporary output file
273
- ]
274
-
275
- # Run the FFmpeg command
276
- result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True)
277
-
278
- if result.returncode == 0:
279
- # Replace the original file with the modified one
280
- shutil.move(temp_file, input_file)
281
- print(f"Successfully added comments to {input_file}")
282
- return True
283
- else:
284
- # Clean up temp file if FFmpeg fails
285
- if os.path.exists(temp_file):
286
- os.remove(temp_file)
287
- print(f"Error: FFmpeg failed with message:\n{result.stderr}")
288
- return False
289
-
290
- except Exception as e:
291
- # Clean up temp file in case of other errors
292
- if 'temp_file' in locals() and os.path.exists(temp_file):
293
- os.remove(temp_file)
294
- print(f"Error saving prompt to video metadata, ffmpeg may be required: "+str(e))
295
- return False
296
-
297
- @torch.no_grad()
298
- def worker(input_image, prompts, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
299
- def encode_prompt(prompt, n_prompt):
300
- llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
301
-
302
- if cfg == 1:
303
- llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
304
- else:
305
- llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
306
-
307
- llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
308
- llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
309
-
310
- llama_vec = llama_vec.to(transformer.dtype)
311
- llama_vec_n = llama_vec_n.to(transformer.dtype)
312
- clip_l_pooler = clip_l_pooler.to(transformer.dtype)
313
- clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
314
- return [llama_vec, clip_l_pooler, llama_vec_n, clip_l_pooler_n, llama_attention_mask, llama_attention_mask_n]
315
-
316
- total_latent_sections = (total_second_length * 30) / (latent_window_size * 4)
317
- total_latent_sections = int(max(round(total_latent_sections), 1))
318
-
319
- job_id = generate_timestamp()
320
-
321
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
322
-
323
- try:
324
- # Clean GPU
325
- if not high_vram:
326
- unload_complete_models(
327
- text_encoder, text_encoder_2, image_encoder, vae, transformer
328
- )
329
-
330
- # Text encoding
331
-
332
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
333
-
334
- if not high_vram:
335
- fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
336
- load_model_as_complete(text_encoder_2, target_device=gpu)
337
-
338
- prompt_parameters = []
339
-
340
- for prompt_part in prompts:
341
- prompt_parameters.append(encode_prompt(prompt_part, n_prompt))
342
-
343
- # Processing input image
344
-
345
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Image processing ...'))))
346
-
347
- H, W, C = input_image.shape
348
- height, width = find_nearest_bucket(H, W, resolution=640)
349
- input_image_np = resize_and_center_crop(input_image, target_width=width, target_height=height)
350
-
351
- Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
352
-
353
- input_image_pt = torch.from_numpy(input_image_np).float() / 127.5 - 1
354
- input_image_pt = input_image_pt.permute(2, 0, 1)[None, :, None]
355
-
356
- # VAE encoding
357
-
358
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'VAE encoding ...'))))
359
-
360
- if not high_vram:
361
- load_model_as_complete(vae, target_device=gpu)
362
-
363
- start_latent = vae_encode(input_image_pt, vae)
364
-
365
- # CLIP Vision
366
-
367
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
368
-
369
- if not high_vram:
370
- load_model_as_complete(image_encoder, target_device=gpu)
371
-
372
- image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
373
- image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
374
-
375
- # Dtype
376
-
377
- image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
378
-
379
- # Sampling
380
-
381
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
382
-
383
- rnd = torch.Generator("cpu").manual_seed(seed)
384
-
385
- history_latents = torch.zeros(size=(1, 16, 16 + 2 + 1, height // 8, width // 8), dtype=torch.float32).cpu()
386
- history_pixels = None
387
-
388
- history_latents = torch.cat([history_latents, start_latent.to(history_latents)], dim=2)
389
- total_generated_latent_frames = 1
390
-
391
- for section_index in range(total_latent_sections):
392
- if stream.input_queue.top() == 'end':
393
- stream.output_queue.push(('end', None))
394
- return
395
-
396
- print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
397
-
398
- if len(prompt_parameters) > 0:
399
- [llama_vec, clip_l_pooler, llama_vec_n, clip_l_pooler_n, llama_attention_mask, llama_attention_mask_n] = prompt_parameters.pop(0)
400
-
401
- if not high_vram:
402
- unload_complete_models()
403
- move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
404
-
405
- if use_teacache:
406
- transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
407
- else:
408
- transformer.initialize_teacache(enable_teacache=False)
409
-
410
- def callback(d):
411
- preview = d['denoised']
412
- preview = vae_decode_fake(preview)
413
-
414
- preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
415
- preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
416
-
417
- if stream.input_queue.top() == 'end':
418
- stream.output_queue.push(('end', None))
419
- raise KeyboardInterrupt('User ends the task.')
420
-
421
- current_step = d['i'] + 1
422
- percentage = int(100.0 * current_step / steps)
423
- hint = f'Sampling {current_step}/{steps}'
424
- desc = f'Total generated frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / 30) :.2f} seconds (FPS-30). The video is being extended now ...'
425
- stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
426
- return
427
-
428
- indices = torch.arange(0, sum([1, 16, 2, 1, latent_window_size])).unsqueeze(0)
429
- clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split([1, 16, 2, 1, latent_window_size], dim=1)
430
- clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
431
-
432
- clean_latents_4x, clean_latents_2x, clean_latents_1x = history_latents[:, :, -sum([16, 2, 1]):, :, :].split([16, 2, 1], dim=2)
433
- clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
434
-
435
- generated_latents = sample_hunyuan(
436
- transformer=transformer,
437
- sampler='unipc',
438
- width=width,
439
- height=height,
440
- frames=latent_window_size * 4 - 3,
441
- real_guidance_scale=cfg,
442
- distilled_guidance_scale=gs,
443
- guidance_rescale=rs,
444
- # shift=3.0,
445
- num_inference_steps=steps,
446
- generator=rnd,
447
- prompt_embeds=llama_vec,
448
- prompt_embeds_mask=llama_attention_mask,
449
- prompt_poolers=clip_l_pooler,
450
- negative_prompt_embeds=llama_vec_n,
451
- negative_prompt_embeds_mask=llama_attention_mask_n,
452
- negative_prompt_poolers=clip_l_pooler_n,
453
- device=gpu,
454
- dtype=torch.bfloat16,
455
- image_embeddings=image_encoder_last_hidden_state,
456
- latent_indices=latent_indices,
457
- clean_latents=clean_latents,
458
- clean_latent_indices=clean_latent_indices,
459
- clean_latents_2x=clean_latents_2x,
460
- clean_latent_2x_indices=clean_latent_2x_indices,
461
- clean_latents_4x=clean_latents_4x,
462
- clean_latent_4x_indices=clean_latent_4x_indices,
463
- callback=callback,
464
- )
465
-
466
- total_generated_latent_frames += int(generated_latents.shape[2])
467
- history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
468
-
469
- if not high_vram:
470
- offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
471
- load_model_as_complete(vae, target_device=gpu)
472
-
473
- real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
474
-
475
- if history_pixels is None:
476
- history_pixels = vae_decode(real_history_latents, vae).cpu()
477
- else:
478
- section_latent_frames = latent_window_size * 2
479
- overlapped_frames = latent_window_size * 4 - 3
480
-
481
- current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
482
- history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
483
-
484
- if not high_vram:
485
- unload_complete_models()
486
-
487
- output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
488
-
489
- save_bcthw_as_mp4(history_pixels, output_filename, fps=30, crf=mp4_crf)
490
-
491
- print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
492
-
493
- stream.output_queue.push(('file', output_filename))
494
- except:
495
- traceback.print_exc()
496
-
497
- if not high_vram:
498
- unload_complete_models(
499
- text_encoder, text_encoder_2, image_encoder, vae, transformer
500
- )
501
-
502
- stream.output_queue.push(('end', None))
503
- return
504
-
505
- def get_duration(input_image, prompt, t2v, n_prompt, randomize_seed, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf):
506
- global total_second_length_debug_value
507
-
508
- if total_second_length_debug_value is not None:
509
- return min(total_second_length_debug_value * 60, 600)
510
- return total_second_length * 60
511
-
512
-
513
- @spaces.GPU(duration=get_duration)
514
- def process(input_image, prompt,
515
- t2v=False,
516
- n_prompt="",
517
- randomize_seed=True,
518
- seed=31337,
519
- total_second_length=5,
520
- latent_window_size=9,
521
- steps=25,
522
- cfg=1.0,
523
- gs=10.0,
524
- rs=0.0,
525
- gpu_memory_preservation=6,
526
- use_teacache=True,
527
- mp4_crf=16
528
- ):
529
- global stream, input_image_debug_value, prompt_debug_value, total_second_length_debug_value
530
-
531
- if torch.cuda.device_count() == 0:
532
- gr.Warning('Set this space to GPU config to make it work.')
533
- return None, None, None, None, None, None
534
-
535
- if input_image_debug_value is not None or prompt_debug_value is not None or total_second_length_debug_value is not None:
536
- print("Debug mode")
537
- input_image = input_image_debug_value
538
- prompt = prompt_debug_value
539
- total_second_length = total_second_length_debug_value
540
- input_image_debug_value = prompt_debug_value = total_second_length_debug_value = None
541
-
542
- if randomize_seed:
543
- seed = random.randint(0, np.iinfo(np.int32).max)
544
-
545
- prompts = prompt.split(";")
546
-
547
- # assert input_image is not None, 'No input image!'
548
- if t2v:
549
- default_height, default_width = 640, 640
550
- input_image = np.ones((default_height, default_width, 3), dtype=np.uint8) * 255
551
- print("No input image provided. Using a blank white image.")
552
-
553
- yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
554
-
555
- stream = AsyncStream()
556
-
557
- async_run(worker, input_image, prompts, n_prompt, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf)
558
-
559
- output_filename = None
560
-
561
- while True:
562
- flag, data = stream.output_queue.next()
563
-
564
- if flag == 'file':
565
- output_filename = data
566
- yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
567
-
568
- if flag == 'progress':
569
- preview, desc, html = data
570
- yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
571
-
572
- if flag == 'end':
573
- yield output_filename, gr.update(visible=False), gr.update(), '', gr.update(interactive=True), gr.update(interactive=False)
574
- break
575
-
576
- # 20250506 pftq: Modified worker to accept video input and clean frame count
577
- @spaces.GPU()
578
- @torch.no_grad()
579
- def worker_video(input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
580
-
581
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Starting ...'))))
582
-
583
- try:
584
- # Clean GPU
585
- if not high_vram:
586
- unload_complete_models(
587
- text_encoder, text_encoder_2, image_encoder, vae, transformer
588
- )
589
-
590
- # Text encoding
591
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Text encoding ...'))))
592
-
593
- if not high_vram:
594
- fake_diffusers_current_device(text_encoder, gpu) # since we only encode one text - that is one model move and one encode, offload is same time consumption since it is also one load and one encode.
595
- load_model_as_complete(text_encoder_2, target_device=gpu)
596
-
597
- llama_vec, clip_l_pooler = encode_prompt_conds(prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
598
-
599
- if cfg == 1:
600
- llama_vec_n, clip_l_pooler_n = torch.zeros_like(llama_vec), torch.zeros_like(clip_l_pooler)
601
- else:
602
- llama_vec_n, clip_l_pooler_n = encode_prompt_conds(n_prompt, text_encoder, text_encoder_2, tokenizer, tokenizer_2)
603
-
604
- llama_vec, llama_attention_mask = crop_or_pad_yield_mask(llama_vec, length=512)
605
- llama_vec_n, llama_attention_mask_n = crop_or_pad_yield_mask(llama_vec_n, length=512)
606
-
607
- # 20250506 pftq: Processing input video instead of image
608
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Video processing ...'))))
609
-
610
- # 20250506 pftq: Encode video
611
- #H, W = 640, 640 # Default resolution, will be adjusted
612
- #height, width = find_nearest_bucket(H, W, resolution=640)
613
- #start_latent, input_image_np, history_latents, fps = video_encode(input_video, vae, height, width, vae_batch_size=16, device=gpu)
614
- start_latent, input_image_np, video_latents, fps, height, width, input_video_pixels = video_encode(input_video, resolution, no_resize, vae, vae_batch_size=vae_batch, device=gpu)
615
-
616
- #Image.fromarray(input_image_np).save(os.path.join(outputs_folder, f'{job_id}.png'))
617
-
618
- # CLIP Vision
619
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'CLIP Vision encoding ...'))))
620
-
621
- if not high_vram:
622
- load_model_as_complete(image_encoder, target_device=gpu)
623
-
624
- image_encoder_output = hf_clip_vision_encode(input_image_np, feature_extractor, image_encoder)
625
- image_encoder_last_hidden_state = image_encoder_output.last_hidden_state
626
-
627
- # Dtype
628
- llama_vec = llama_vec.to(transformer.dtype)
629
- llama_vec_n = llama_vec_n.to(transformer.dtype)
630
- clip_l_pooler = clip_l_pooler.to(transformer.dtype)
631
- clip_l_pooler_n = clip_l_pooler_n.to(transformer.dtype)
632
- image_encoder_last_hidden_state = image_encoder_last_hidden_state.to(transformer.dtype)
633
-
634
- total_latent_sections = (total_second_length * fps) / (latent_window_size * 4)
635
- total_latent_sections = int(max(round(total_latent_sections), 1))
636
-
637
- for idx in range(batch):
638
- if batch > 1:
639
- print(f"Beginning video {idx+1} of {batch} with seed {seed} ")
640
-
641
- #job_id = generate_timestamp()
642
- job_id = datetime.now().strftime("%Y-%m-%d_%H-%M-%S")+f"_framepackf1-videoinput_{width}-{total_second_length}sec_seed-{seed}_steps-{steps}_distilled-{gs}_cfg-{cfg}" # 20250506 pftq: easier to read timestamp and filename
643
-
644
- # Sampling
645
- stream.output_queue.push(('progress', (None, '', make_progress_bar_html(0, 'Start sampling ...'))))
646
-
647
- rnd = torch.Generator("cpu").manual_seed(seed)
648
-
649
- # 20250506 pftq: Initialize history_latents with video latents
650
- history_latents = video_latents.cpu()
651
- total_generated_latent_frames = history_latents.shape[2]
652
- # 20250506 pftq: Initialize history_pixels to fix UnboundLocalError
653
- history_pixels = None
654
- previous_video = None
655
-
656
- # 20250507 pftq: hot fix for initial video being corrupted by vae encoding, issue with ghosting because of slight differences
657
- #history_pixels = input_video_pixels
658
- #save_bcthw_as_mp4(vae_decode(video_latents, vae).cpu(), os.path.join(outputs_folder, f'{job_id}_input_video.mp4'), fps=fps, crf=mp4_crf) # 20250507 pftq: test fast movement corrupted by vae encoding if vae batch size too low
659
-
660
- for section_index in range(total_latent_sections):
661
- if stream.input_queue.top() == 'end':
662
- stream.output_queue.push(('end', None))
663
- return
664
-
665
- print(f'section_index = {section_index}, total_latent_sections = {total_latent_sections}')
666
-
667
- if not high_vram:
668
- unload_complete_models()
669
- move_model_to_device_with_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=gpu_memory_preservation)
670
-
671
- if use_teacache:
672
- transformer.initialize_teacache(enable_teacache=True, num_steps=steps)
673
- else:
674
- transformer.initialize_teacache(enable_teacache=False)
675
-
676
- def callback(d):
677
- preview = d['denoised']
678
- preview = vae_decode_fake(preview)
679
-
680
- preview = (preview * 255.0).detach().cpu().numpy().clip(0, 255).astype(np.uint8)
681
- preview = einops.rearrange(preview, 'b c t h w -> (b h) (t w) c')
682
-
683
- if stream.input_queue.top() == 'end':
684
- stream.output_queue.push(('end', None))
685
- raise KeyboardInterrupt('User ends the task.')
686
-
687
- current_step = d['i'] + 1
688
- percentage = int(100.0 * current_step / steps)
689
- hint = f'Sampling {current_step}/{steps}'
690
- desc = f'Total frames: {int(max(0, total_generated_latent_frames * 4 - 3))}, Video length: {max(0, (total_generated_latent_frames * 4 - 3) / fps) :.2f} seconds (FPS-{fps}), Seed: {seed}, Video {idx+1} of {batch}. The video is generating part {section_index+1} of {total_latent_sections}...'
691
- stream.output_queue.push(('progress', (preview, desc, make_progress_bar_html(percentage, hint))))
692
- return
693
-
694
- # 20250506 pftq: Use user-specified number of context frames, matching original allocation for num_clean_frames=2
695
- available_frames = history_latents.shape[2] # Number of latent frames
696
- max_pixel_frames = min(latent_window_size * 4 - 3, available_frames * 4) # Cap at available pixel frames
697
- adjusted_latent_frames = max(1, (max_pixel_frames + 3) // 4) # Convert back to latent frames
698
- # Adjust num_clean_frames to match original behavior: num_clean_frames=2 means 1 frame for clean_latents_1x
699
- effective_clean_frames = max(0, num_clean_frames - 1) if num_clean_frames > 1 else 0
700
- effective_clean_frames = min(effective_clean_frames, available_frames - 2) if available_frames > 2 else 0 # 20250507 pftq: changed 1 to 2 for edge case for <=1 sec videos
701
- num_2x_frames = min(2, max(1, available_frames - effective_clean_frames - 1)) if available_frames > effective_clean_frames + 1 else 0 # 20250507 pftq: subtracted 1 for edge case for <=1 sec videos
702
- num_4x_frames = min(16, max(1, available_frames - effective_clean_frames - num_2x_frames)) if available_frames > effective_clean_frames + num_2x_frames else 0 # 20250507 pftq: Edge case for <=1 sec
703
-
704
- total_context_frames = num_4x_frames + num_2x_frames + effective_clean_frames
705
- total_context_frames = min(total_context_frames, available_frames) # 20250507 pftq: Edge case for <=1 sec videos
706
-
707
- indices = torch.arange(0, sum([1, num_4x_frames, num_2x_frames, effective_clean_frames, adjusted_latent_frames])).unsqueeze(0) # 20250507 pftq: latent_window_size to adjusted_latent_frames for edge case for <=1 sec videos
708
- clean_latent_indices_start, clean_latent_4x_indices, clean_latent_2x_indices, clean_latent_1x_indices, latent_indices = indices.split(
709
- [1, num_4x_frames, num_2x_frames, effective_clean_frames, adjusted_latent_frames], dim=1 # 20250507 pftq: latent_window_size to adjusted_latent_frames for edge case for <=1 sec videos
710
- )
711
- clean_latent_indices = torch.cat([clean_latent_indices_start, clean_latent_1x_indices], dim=1)
712
-
713
- # 20250506 pftq: Split history_latents dynamically based on available frames
714
- fallback_frame_count = 2 # 20250507 pftq: Changed 0 to 2 Edge case for <=1 sec videos
715
- context_frames = history_latents[:, :, -total_context_frames:, :, :] if total_context_frames > 0 else history_latents[:, :, :fallback_frame_count, :, :]
716
- if total_context_frames > 0:
717
- split_sizes = [num_4x_frames, num_2x_frames, effective_clean_frames]
718
- split_sizes = [s for s in split_sizes if s > 0] # Remove zero sizes
719
- if split_sizes:
720
- splits = context_frames.split(split_sizes, dim=2)
721
- split_idx = 0
722
- clean_latents_4x = splits[split_idx] if num_4x_frames > 0 else history_latents[:, :, :fallback_frame_count, :, :]
723
- if clean_latents_4x.shape[2] < 2: # 20250507 pftq: edge case for <=1 sec videos
724
- clean_latents_4x = torch.cat([clean_latents_4x, clean_latents_4x[:, :, -1:, :, :]], dim=2)[:, :, :2, :, :]
725
- split_idx += 1 if num_4x_frames > 0 else 0
726
- clean_latents_2x = splits[split_idx] if num_2x_frames > 0 and split_idx < len(splits) else history_latents[:, :, :fallback_frame_count, :, :]
727
- if clean_latents_2x.shape[2] < 2: # 20250507 pftq: edge case for <=1 sec videos
728
- clean_latents_2x = torch.cat([clean_latents_2x, clean_latents_2x[:, :, -1:, :, :]], dim=2)[:, :, :2, :, :]
729
- split_idx += 1 if num_2x_frames > 0 else 0
730
- clean_latents_1x = splits[split_idx] if effective_clean_frames > 0 and split_idx < len(splits) else history_latents[:, :, :fallback_frame_count, :, :]
731
- else:
732
- clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :]
733
- else:
734
- clean_latents_4x = clean_latents_2x = clean_latents_1x = history_latents[:, :, :fallback_frame_count, :, :]
735
-
736
- clean_latents = torch.cat([start_latent.to(history_latents), clean_latents_1x], dim=2)
737
-
738
- # 20250507 pftq: Fix for <=1 sec videos.
739
- max_frames = min(latent_window_size * 4 - 3, history_latents.shape[2] * 4)
740
-
741
- generated_latents = sample_hunyuan(
742
- transformer=transformer,
743
- sampler='unipc',
744
- width=width,
745
- height=height,
746
- frames=max_frames,
747
- real_guidance_scale=cfg,
748
- distilled_guidance_scale=gs,
749
- guidance_rescale=rs,
750
- num_inference_steps=steps,
751
- generator=rnd,
752
- prompt_embeds=llama_vec,
753
- prompt_embeds_mask=llama_attention_mask,
754
- prompt_poolers=clip_l_pooler,
755
- negative_prompt_embeds=llama_vec_n,
756
- negative_prompt_embeds_mask=llama_attention_mask_n,
757
- negative_prompt_poolers=clip_l_pooler_n,
758
- device=gpu,
759
- dtype=torch.bfloat16,
760
- image_embeddings=image_encoder_last_hidden_state,
761
- latent_indices=latent_indices,
762
- clean_latents=clean_latents,
763
- clean_latent_indices=clean_latent_indices,
764
- clean_latents_2x=clean_latents_2x,
765
- clean_latent_2x_indices=clean_latent_2x_indices,
766
- clean_latents_4x=clean_latents_4x,
767
- clean_latent_4x_indices=clean_latent_4x_indices,
768
- callback=callback,
769
- )
770
-
771
- total_generated_latent_frames += int(generated_latents.shape[2])
772
- history_latents = torch.cat([history_latents, generated_latents.to(history_latents)], dim=2)
773
-
774
- if not high_vram:
775
- offload_model_from_device_for_memory_preservation(transformer, target_device=gpu, preserved_memory_gb=8)
776
- load_model_as_complete(vae, target_device=gpu)
777
-
778
- real_history_latents = history_latents[:, :, -total_generated_latent_frames:, :, :]
779
-
780
- if history_pixels is None:
781
- history_pixels = vae_decode(real_history_latents, vae).cpu()
782
- else:
783
- section_latent_frames = latent_window_size * 2
784
- overlapped_frames = min(latent_window_size * 4 - 3, history_pixels.shape[2])
785
-
786
- #if section_index == 0:
787
- #extra_latents = 1 # Add up to 2 extra latent frames for smoother overlap to initial video
788
- #extra_pixel_frames = extra_latents * 4 # Approx. 4 pixel frames per latent
789
- #overlapped_frames = min(overlapped_frames + extra_pixel_frames, history_pixels.shape[2], section_latent_frames * 4)
790
-
791
- current_pixels = vae_decode(real_history_latents[:, :, -section_latent_frames:], vae).cpu()
792
- history_pixels = soft_append_bcthw(history_pixels, current_pixels, overlapped_frames)
793
-
794
- if not high_vram:
795
- unload_complete_models()
796
-
797
- output_filename = os.path.join(outputs_folder, f'{job_id}_{total_generated_latent_frames}.mp4')
798
-
799
- # 20250506 pftq: Use input video FPS for output
800
- save_bcthw_as_mp4(history_pixels, output_filename, fps=fps, crf=mp4_crf)
801
- print(f"Latest video saved: {output_filename}")
802
- # 20250508 pftq: Save prompt to mp4 metadata comments
803
- set_mp4_comments_imageio_ffmpeg(output_filename, f"Prompt: {prompt} | Negative Prompt: {n_prompt}");
804
- print(f"Prompt saved to mp4 metadata comments: {output_filename}")
805
-
806
- # 20250506 pftq: Clean up previous partial files
807
- if previous_video is not None and os.path.exists(previous_video):
808
- try:
809
- os.remove(previous_video)
810
- print(f"Previous partial video deleted: {previous_video}")
811
- except Exception as e:
812
- print(f"Error deleting previous partial video {previous_video}: {e}")
813
- previous_video = output_filename
814
-
815
- print(f'Decoded. Current latent shape {real_history_latents.shape}; pixel shape {history_pixels.shape}')
816
-
817
- stream.output_queue.push(('file', output_filename))
818
-
819
- seed = (seed + 1) % np.iinfo(np.int32).max
820
-
821
- except:
822
- traceback.print_exc()
823
-
824
- if not high_vram:
825
- unload_complete_models(
826
- text_encoder, text_encoder_2, image_encoder, vae, transformer
827
- )
828
-
829
- stream.output_queue.push(('end', None))
830
- return
831
-
832
- def get_duration_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
833
- global total_second_length_debug_value
834
- if total_second_length_debug_value is not None:
835
- return min(total_second_length_debug_value * 60 * 10, 600)
836
- return total_second_length * 60 * 10
837
-
838
- # 20250506 pftq: Modified process to pass clean frame count, etc from video_encode
839
- @spaces.GPU(duration=get_duration_video)
840
- def process_video(input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch):
841
- global stream, high_vram, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
842
-
843
- if torch.cuda.device_count() == 0:
844
- gr.Warning('Set this space to GPU config to make it work.')
845
- return None, None, None, None, None, None
846
-
847
- if input_video_debug_value is not None or prompt_debug_value is not None or total_second_length_debug_value is not None:
848
- input_video = input_video_debug_value
849
- prompt = prompt_debug_value
850
- total_second_length = total_second_length_debug_value
851
- input_video_debug_value = prompt_debug_value = total_second_length_debug_value = None
852
-
853
- if randomize_seed:
854
- seed = random.randint(0, np.iinfo(np.int32).max)
855
-
856
- # 20250506 pftq: Updated assertion for video input
857
- assert input_video is not None, 'No input video!'
858
-
859
- yield None, None, '', '', gr.update(interactive=False), gr.update(interactive=True)
860
-
861
- # 20250507 pftq: Even the H100 needs offloading if the video dimensions are 720p or higher
862
- if high_vram and (no_resize or resolution>640):
863
- print("Disabling high vram mode due to no resize and/or potentially higher resolution...")
864
- high_vram = False
865
- vae.enable_slicing()
866
- vae.enable_tiling()
867
- DynamicSwapInstaller.install_model(transformer, device=gpu)
868
- DynamicSwapInstaller.install_model(text_encoder, device=gpu)
869
-
870
- # 20250508 pftq: automatically set distilled cfg to 1 if cfg is used
871
- if cfg > 1:
872
- gs = 1
873
-
874
- stream = AsyncStream()
875
-
876
- # 20250506 pftq: Pass num_clean_frames, vae_batch, etc
877
- async_run(worker_video, input_video, prompt, n_prompt, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch)
878
-
879
- output_filename = None
880
-
881
- while True:
882
- flag, data = stream.output_queue.next()
883
-
884
- if flag == 'file':
885
- output_filename = data
886
- yield output_filename, gr.update(), gr.update(), gr.update(), gr.update(interactive=False), gr.update(interactive=True)
887
-
888
- if flag == 'progress':
889
- preview, desc, html = data
890
- #yield gr.update(), gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True)
891
- yield output_filename, gr.update(visible=True, value=preview), desc, html, gr.update(interactive=False), gr.update(interactive=True) # 20250506 pftq: Keep refreshing the video in case it got hidden when the tab was in the background
892
-
893
- if flag == 'end':
894
- yield output_filename, gr.update(visible=False), desc+' Video complete.', '', gr.update(interactive=True), gr.update(interactive=False)
895
- break
896
-
897
- def end_process():
898
- stream.input_queue.push('end')
899
-
900
-
901
- css = make_progress_bar_css()
902
- block = gr.Blocks(css=css).queue()
903
- with block:
904
- if torch.cuda.device_count() == 0:
905
- with gr.Row():
906
- gr.HTML("""
907
- <p style="background-color: red;"><big><big><big><b>⚠️To use FramePack, <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR?duplicate=true">duplicate this space</a> and set a GPU with 30 GB VRAM.</b>
908
-
909
- You can't use FramePack directly here because this space runs on a CPU, which is not enough for FramePack. Please provide <a href="https://huggingface.co/spaces/Fabrice-TIERCELIN/SUPIR/discussions/new">feedback</a> if you have issues.
910
- </big></big></big></p>
911
- """)
912
- # 20250506 pftq: Updated title to reflect video input functionality
913
- gr.Markdown('# Framepack F1 with Image Input or with Video Input (Video Extension)')
914
- gr.Markdown(f"""### Video diffusion, but feels like image diffusion
915
- *FramePack F1 - a FramePack model that only predicts future frames from history frames*
916
- ### *beta* FramePack Fill 🖋️- draw a mask over the input image to inpaint the video output
917
- adapted from the official code repo [FramePack](https://github.com/lllyasviel/FramePack) by [lllyasviel](lllyasviel/FramePack_F1_I2V_HY_20250503) and [FramePack Studio](https://github.com/colinurbs/FramePack-Studio) 🙌🏻
918
- """)
919
- with gr.Row():
920
- with gr.Column():
921
- input_image = gr.Image(sources='upload', type="numpy", label="Image", height=320)
922
- input_video = gr.Video(sources='upload', label="Input Video", height=320)
923
- prompt = gr.Textbox(label="Prompt", value='')
924
- t2v = gr.Checkbox(label="Do text-to-video (ignored for video extension)", value=False)
925
-
926
- with gr.Row():
927
- start_button = gr.Button(value="Generate from image", variant="primary")
928
- start_button_video = gr.Button(value="Generate from video", variant="primary")
929
- end_button = gr.Button(value="End Generation", variant="stop", interactive=False)
930
-
931
- total_second_length = gr.Slider(label="Video Length to Generate (seconds)", minimum=1, maximum=120, value=2, step=0.1)
932
-
933
- with gr.Accordion("Advanced settings", open=False):
934
- with gr.Row():
935
- use_teacache = gr.Checkbox(label='Use TeaCache', value=True, info='Faster speed, but often makes hands and fingers slightly worse.')
936
- no_resize = gr.Checkbox(label='Force Original Video Resolution (no Resizing) (only for video extension)', value=False, info='Might run out of VRAM (720p requires > 24GB VRAM).')
937
-
938
- n_prompt = gr.Textbox(label="Negative Prompt", value="Missing arm, unrealistic position, blurred, blurry", info='Requires using normal CFG (undistilled) instead of Distilled (set Distilled=1 and CFG > 1).')
939
- randomize_seed = gr.Checkbox(label='Randomize seed', value=True, info='If checked, the seed is always different')
940
- seed = gr.Slider(label="Seed", minimum=0, maximum=np.iinfo(np.int32).max, step=1, randomize=True)
941
-
942
- latent_window_size = gr.Slider(label="Latent Window Size", minimum=1, maximum=33, value=9, step=1, info='Generate more frames at a time (larger chunks). Less degradation and better blending but higher VRAM cost. Should not change.')
943
- steps = gr.Slider(label="Steps", minimum=1, maximum=100, value=25, step=1, info='Increase for more quality, especially if using high non-distilled CFG. Changing this value is not recommended.')
944
- batch = gr.Slider(label="Batch Size (Number of Videos)", minimum=1, maximum=1000, value=1, step=1, info='Generate multiple videos each with a different seed (only for video extension).')
945
-
946
- resolution = gr.Number(label="Resolution (max width or height)", value=640, precision=0, info='Only for video extension')
947
-
948
- # 20250506 pftq: Reduced default distilled guidance scale to improve adherence to input video
949
- cfg = gr.Slider(label="CFG Scale", minimum=1.0, maximum=32.0, value=1.0, step=0.01, info='Use this instead of Distilled for more detail/control + Negative Prompt (make sure Distilled set to 1). Doubles render time. Should not change.')
950
- gs = gr.Slider(label="Distilled CFG Scale", minimum=1.0, maximum=32.0, value=10.0, step=0.01, info='Prompt adherence at the cost of less details from the input video, but to a lesser extent than Context Frames; 3=blurred motions& & unsharped, 10=focus motion; changing this value is not recommended')
951
- rs = gr.Slider(label="CFG Re-Scale", minimum=0.0, maximum=1.0, value=0.0, step=0.01) # Should not change
952
-
953
-
954
- # 20250506 pftq: Renamed slider to Number of Context Frames and updated description
955
- num_clean_frames = gr.Slider(label="Number of Context Frames", minimum=2, maximum=10, value=5, step=1, info="Retain more video details but increase memory use. Reduce to 2 if memory issues (only for video extension).")
956
-
957
- default_vae = 32
958
- if high_vram:
959
- default_vae = 128
960
- elif free_mem_gb>=20:
961
- default_vae = 64
962
-
963
- vae_batch = gr.Slider(label="VAE Batch Size for Input Video", minimum=4, maximum=256, value=default_vae, step=4, info="Reduce if running out of memory. Increase for better quality frames during fast motion (only for video extension).")
964
-
965
-
966
- gpu_memory_preservation = gr.Slider(label="GPU Inference Preserved Memory (GB) (larger means slower)", minimum=6, maximum=128, value=6, step=0.1, info="Set this number to a larger value if you encounter OOM. Larger value causes slower speed.")
967
-
968
- mp4_crf = gr.Slider(label="MP4 Compression", minimum=0, maximum=100, value=16, step=1, info="Lower means better quality. 0 is uncompressed. Change to 16 if you get black outputs. ")
969
-
970
- with gr.Accordion("Debug", open=False):
971
- input_image_debug = gr.Image(type="numpy", label="Image Debug", height=320)
972
- input_video_debug = gr.Video(sources='upload', label="Input Video Debug", height=320)
973
- prompt_debug = gr.Textbox(label="Prompt Debug", value='')
974
- total_second_length_debug = gr.Slider(label="Additional Video Length to Generate (Seconds) Debug", minimum=1, maximum=120, value=1, step=0.1)
975
-
976
- with gr.Column():
977
- preview_image = gr.Image(label="Next Latents", height=200, visible=False)
978
- result_video = gr.Video(label="Finished Frames", autoplay=True, show_share_button=False, height=512, loop=True)
979
- progress_desc = gr.Markdown('', elem_classes='no-generating-animation')
980
- progress_bar = gr.HTML('', elem_classes='no-generating-animation')
981
-
982
- # 20250506 pftq: Updated inputs to include num_clean_frames
983
- ips = [input_image, prompt, t2v, n_prompt, randomize_seed, seed, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, mp4_crf]
984
- ips_video = [input_video, prompt, n_prompt, randomize_seed, seed, batch, resolution, total_second_length, latent_window_size, steps, cfg, gs, rs, gpu_memory_preservation, use_teacache, no_resize, mp4_crf, num_clean_frames, vae_batch]
985
- start_button.click(fn=process, inputs=ips, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button, end_button])
986
- start_button_video.click(fn=process_video, inputs=ips_video, outputs=[result_video, preview_image, progress_desc, progress_bar, start_button_video, end_button])
987
- end_button.click(fn=end_process)
988
-
989
- with gr.Row(elem_id="image_examples", visible=False):
990
- gr.Examples(
991
- examples = [
992
- [
993
- "./img_examples/Example1.png", # input_image
994
- "View of the sea as far as the eye can see, from the seaside, a piece of land is barely visible on the horizon at the middle, the sky is radiant, reflections of the sun in the water, photorealistic, realistic, intricate details, 8k, insanely detailed",
995
- False, # t2v
996
- "Missing arm, unrealistic position, blurred, blurry", # n_prompt
997
- True, # randomize_seed
998
- 42, # seed
999
- 1, # total_second_length
1000
- 9, # latent_window_size
1001
- 25, # steps
1002
- 1.0, # cfg
1003
- 10.0, # gs
1004
- 0.0, # rs
1005
- 6, # gpu_memory_preservation
1006
- True, # use_teacache
1007
- 16 # mp4_crf
1008
- ],
1009
- [
1010
- "./img_examples/Example1.png", # input_image
1011
- "A dolphin emerges from the water, photorealistic, realistic, intricate details, 8k, insanely detailed",
1012
- False, # t2v
1013
- "Missing arm, unrealistic position, blurred, blurry", # n_prompt
1014
- True, # randomize_seed
1015
- 42, # seed
1016
- 1, # total_second_length
1017
- 9, # latent_window_size
1018
- 25, # steps
1019
- 1.0, # cfg
1020
- 10.0, # gs
1021
- 0.0, # rs
1022
- 6, # gpu_memory_preservation
1023
- True, # use_teacache
1024
- 16 # mp4_crf
1025
- ],
1026
- [
1027
- "./img_examples/Example1.png", # input_image
1028
- "We are sinking, photorealistic, realistic, intricate details, 8k, insanely detailed",
1029
- False, # t2v
1030
- "Missing arm, unrealistic position, blurred, blurry", # n_prompt
1031
- True, # randomize_seed
1032
- 42, # seed
1033
- 1, # total_second_length
1034
- 9, # latent_window_size
1035
- 25, # steps
1036
- 1.0, # cfg
1037
- 10.0, # gs
1038
- 0.0, # rs
1039
- 6, # gpu_memory_preservation
1040
- True, # use_teacache
1041
- 16 # mp4_crf
1042
- ],
1043
- [
1044
- "./img_examples/Example1.png", # input_image
1045
- "A boat is passing, photorealistic, realistic, intricate details, 8k, insanely detailed",
1046
- False, # t2v
1047
- "Missing arm, unrealistic position, blurred, blurry", # n_prompt
1048
- True, # randomize_seed
1049
- 42, # seed
1050
- 1, # total_second_length
1051
- 9, # latent_window_size
1052
- 25, # steps
1053
- 1.0, # cfg
1054
- 10.0, # gs
1055
- 0.0, # rs
1056
- 6, # gpu_memory_preservation
1057
- True, # use_teacache
1058
- 16 # mp4_crf
1059
- ],
1060
- ],
1061
- run_on_click = True,
1062
- fn = process,
1063
- inputs = ips,
1064
- outputs = [result_video, preview_image, progress_desc, progress_bar, start_button, end_button],
1065
- cache_examples = True,
1066
- )
1067
-
1068
- with gr.Row(elem_id="video_examples", visible=False):
1069
- gr.Examples(
1070
- examples = [
1071
- [
1072
- "./img_examples/Example1.mp4", # input_video
1073
- "View of the sea as far as the eye can see, from the seaside, a piece of land is barely visible on the horizon at the middle, the sky is radiant, reflections of the sun in the water, photorealistic, realistic, intricate details, 8k, insanely detailed",
1074
- "Missing arm, unrealistic position, blurred, blurry", # n_prompt
1075
- True, # randomize_seed
1076
- 42, # seed
1077
- 1, # batch
1078
- 640, # resolution
1079
- 1, # total_second_length
1080
- 9, # latent_window_size
1081
- 25, # steps
1082
- 1.0, # cfg
1083
- 10.0, # gs
1084
- 0.0, # rs
1085
- 6, # gpu_memory_preservation
1086
- True, # use_teacache
1087
- False, # no_resize
1088
- 16, # mp4_crf
1089
- 5, # num_clean_frames
1090
- default_vae
1091
- ],
1092
- ],
1093
- run_on_click = True,
1094
- fn = process_video,
1095
- inputs = ips_video,
1096
- outputs = [result_video, preview_image, progress_desc, progress_bar, start_button_video, end_button],
1097
- cache_examples = True,
1098
- )
1099
-
1100
- gr.Markdown('## Guide')
1101
- gr.Markdown("I discourage to use the Text-to-Video feature. You should rather generate an image with Flux and use Image-to-Video. You will save time.")
1102
-
1103
-
1104
- def handle_field_debug_change(input_image_debug_data, input_video_debug_data, prompt_debug_data, total_second_length_debug_data):
1105
- global input_image_debug_value, input_video_debug_value, prompt_debug_value, total_second_length_debug_value
1106
- input_image_debug_value = input_image_debug_data
1107
- input_video_debug_value = input_video_debug_data
1108
- prompt_debug_value = prompt_debug_data
1109
- total_second_length_debug_value = total_second_length_debug_data
1110
- return []
1111
-
1112
- input_image_debug.upload(
1113
- fn=handle_field_debug_change,
1114
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1115
- outputs=[]
1116
- )
1117
-
1118
- input_video_debug.upload(
1119
- fn=handle_field_debug_change,
1120
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1121
- outputs=[]
1122
- )
1123
-
1124
- prompt_debug.change(
1125
- fn=handle_field_debug_change,
1126
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1127
- outputs=[]
1128
- )
1129
-
1130
- total_second_length_debug.change(
1131
- fn=handle_field_debug_change,
1132
- inputs=[input_image_debug, input_video_debug, prompt_debug, total_second_length_debug],
1133
- outputs=[]
1134
- )
1135
-
1136
- block.launch(mcp_server=False, ssr_mode=False)