Fabrice-TIERCELIN commited on
Commit
f4eb846
·
verified ·
1 Parent(s): 4bcd590

Upload 3 files

Browse files
Schoolboy_with_backpack.webp ADDED
Schoolboy_without_backpack.webp ADDED
app.py CHANGED
@@ -1,491 +1,491 @@
1
- import os
2
- # PyTorch 2.8 (temporary hack)
3
- os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
4
-
5
- # --- 1. Model Download and Setup (Diffusers Backend) ---
6
- try:
7
- import spaces
8
- except:
9
- class spaces():
10
- def GPU(*args, **kwargs):
11
- def decorator(function):
12
- return lambda *dummy_args, **dummy_kwargs: function(*dummy_args, **dummy_kwargs)
13
- return decorator
14
-
15
- import torch
16
- from diffusers import FlowMatchEulerDiscreteScheduler
17
- from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
18
- from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
19
- from diffusers.utils.export_utils import export_to_video
20
- import gradio as gr
21
- import tempfile
22
- import time
23
- from datetime import datetime
24
- import numpy as np
25
- from PIL import Image
26
- import random
27
- import math
28
- import gc
29
- from gradio_client import Client, handle_file # Import for API call
30
-
31
- # Import the optimization function from the separate file
32
- from optimization import optimize_pipeline_
33
-
34
- # --- Constants and Model Loading ---
35
- MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
36
-
37
- # --- NEW: Flexible Dimension Constants ---
38
- MAX_DIMENSION = 832
39
- MIN_DIMENSION = 480
40
- DIMENSION_MULTIPLE = 16
41
- SQUARE_SIZE = 480
42
-
43
- MAX_SEED = np.iinfo(np.int32).max
44
-
45
- FIXED_FPS = 24
46
- MIN_FRAMES_MODEL = 8
47
- MAX_FRAMES_MODEL = 81
48
-
49
- MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS, 1)
50
- MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS, 1)
51
-
52
- input_image_debug_value = [None]
53
- end_image_debug_value = [None]
54
- prompt_debug_value = [None]
55
- total_second_length_debug_value = [None]
56
-
57
- default_negative_prompt = "Vibrant colors, overexposure, static, blurred details, subtitles, error, style, artwork, painting, image, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, mutilated, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still image, cluttered background, three legs, many people in the background, walking backwards, overexposure, jumpcut, crossfader, "
58
-
59
- print("Loading transformer...")
60
-
61
- transformer = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
62
- subfolder='transformer',
63
- torch_dtype=torch.bfloat16,
64
- device_map='cuda',
65
- )
66
-
67
- print("Loadingtransformer 2...")
68
-
69
- transformer_2 = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
70
- subfolder='transformer_2',
71
- torch_dtype=torch.bfloat16,
72
- device_map='cuda',
73
- )
74
-
75
- print("Loading models into memory. This may take a few minutes...")
76
-
77
- pipe = WanImageToVideoPipeline.from_pretrained(
78
- MODEL_ID,
79
- transformer = transformer,
80
- transformer_2 = transformer_2,
81
- torch_dtype=torch.bfloat16,
82
- )
83
- print("Loading scheduler...")
84
- pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config, shift=8.0)
85
- pipe.to('cuda')
86
-
87
- print("Clean cache...")
88
- for i in range(3):
89
- gc.collect()
90
- torch.cuda.synchronize()
91
- torch.cuda.empty_cache()
92
-
93
- print("Optimizing pipeline...")
94
-
95
- optimize_pipeline_(pipe,
96
- image=Image.new('RGB', (MAX_DIMENSION, MIN_DIMENSION)),
97
- prompt='prompt',
98
- height=MIN_DIMENSION,
99
- width=MAX_DIMENSION,
100
- num_frames=MAX_FRAMES_MODEL,
101
- )
102
- print("All models loaded and optimized. Gradio app is ready.")
103
-
104
-
105
- # --- 2. Image Processing and Application Logic ---
106
- def generate_end_frame(start_img, gen_prompt, progress=gr.Progress(track_tqdm=True)):
107
- """Calls an external Gradio API to generate an image."""
108
- if start_img is None:
109
- raise gr.Error("Please provide a Start Frame first.")
110
-
111
- hf_token = os.getenv("HF_TOKEN")
112
- if not hf_token:
113
- raise gr.Error("HF_TOKEN not found in environment variables. Please set it in your Space secrets.")
114
-
115
- with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmpfile:
116
- start_img.save(tmpfile.name)
117
- tmp_path = tmpfile.name
118
-
119
- progress(0.1, desc="Connecting to image generation API...")
120
- client = Client("multimodalart/nano-banana-private")
121
-
122
- progress(0.5, desc=f"Generating with prompt: '{gen_prompt}'...")
123
- try:
124
- result = client.predict(
125
- prompt=gen_prompt,
126
- images=[
127
- {"image": handle_file(tmp_path)}
128
- ],
129
- manual_token=hf_token,
130
- api_name="/unified_image_generator"
131
- )
132
- finally:
133
- os.remove(tmp_path)
134
-
135
- progress(1.0, desc="Done!")
136
- print(result)
137
- return result
138
-
139
- def switch_to_upload_tab():
140
- """Returns a gr.Tabs update to switch to the first tab."""
141
- return gr.Tabs(selected="upload_tab")
142
-
143
-
144
- def process_image_for_video(image: Image.Image) -> Image.Image:
145
- """
146
- Resizes an image based on the following rules for video generation:
147
- 1. The longest side will be scaled down to MAX_DIMENSION if it's larger.
148
- 2. The shortest side will be scaled up to MIN_DIMENSION if it's smaller.
149
- 3. The final dimensions will be rounded to the nearest multiple of DIMENSION_MULTIPLE.
150
- 4. Square images are resized to a fixed SQUARE_SIZE.
151
- The aspect ratio is preserved as closely as possible.
152
- """
153
- width, height = image.size
154
-
155
- # Rule 4: Handle square images
156
- if width == height:
157
- return image.resize((SQUARE_SIZE, SQUARE_SIZE), Image.Resampling.LANCZOS)
158
-
159
- # Determine target dimensions while preserving aspect ratio
160
- aspect_ratio = width / height
161
- new_width, new_height = width, height
162
-
163
- # Rule 1: Scale down if too large
164
- if new_width > MAX_DIMENSION or new_height > MAX_DIMENSION:
165
- if aspect_ratio > 1: # Landscape
166
- scale = MAX_DIMENSION / new_width
167
- else: # Portrait
168
- scale = MAX_DIMENSION / new_height
169
- new_width *= scale
170
- new_height *= scale
171
-
172
- # Rule 2: Scale up if too small
173
- if new_width < MIN_DIMENSION or new_height < MIN_DIMENSION:
174
- if aspect_ratio > 1: # Landscape
175
- scale = MIN_DIMENSION / new_height
176
- else: # Portrait
177
- scale = MIN_DIMENSION / new_width
178
- new_width *= scale
179
- new_height *= scale
180
-
181
- # Rule 3: Round to the nearest multiple of DIMENSION_MULTIPLE
182
- final_width = int(round(new_width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
183
- final_height = int(round(new_height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
184
-
185
- # Ensure final dimensions are at least the minimum
186
- final_width = max(final_width, MIN_DIMENSION if aspect_ratio < 1 else SQUARE_SIZE)
187
- final_height = max(final_height, MIN_DIMENSION if aspect_ratio > 1 else SQUARE_SIZE)
188
-
189
-
190
- return image.resize((final_width, final_height), Image.Resampling.LANCZOS)
191
-
192
- def resize_and_crop_to_match(target_image, reference_image):
193
- """Resizes and center-crops the target image to match the reference image's dimensions."""
194
- ref_width, ref_height = reference_image.size
195
- target_width, target_height = target_image.size
196
- scale = max(ref_width / target_width, ref_height / target_height)
197
- new_width, new_height = int(target_width * scale), int(target_height * scale)
198
- resized = target_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
199
- left, top = (new_width - ref_width) // 2, (new_height - ref_height) // 2
200
- return resized.crop((left, top, left + ref_width, top + ref_height))
201
-
202
- def generate_video(
203
- start_image_pil,
204
- end_image_pil,
205
- prompt,
206
- negative_prompt=default_negative_prompt,
207
- duration_seconds=2.1,
208
- steps=8,
209
- guidance_scale=1,
210
- guidance_scale_2=1,
211
- seed=42,
212
- randomize_seed=True,
213
- progress=gr.Progress(track_tqdm=True)
214
- ):
215
- start = time.time()
216
- allocation_time = 120
217
- factor = 1
218
-
219
- if input_image_debug_value[0] is not None or end_image_debug_value[0] is not None or prompt_debug_value[0] is not None or total_second_length_debug_value[0] is not None:
220
- start_image_pil = input_image_debug_value[0]
221
- end_image_pil = end_image_debug_value[0]
222
- prompt = prompt_debug_value[0]
223
- duration_seconds = total_second_length_debug_value[0]
224
- allocation_time = min(duration_seconds * 60 * 100, 10 * 60)
225
- factor = 3.1
226
-
227
- if start_image_pil is None or end_image_pil is None:
228
- raise gr.Error("Please upload both a start and an end image.")
229
-
230
- # Step 1: Process the start image to get our target dimensions based on the new rules.
231
- processed_start_image = process_image_for_video(start_image_pil)
232
-
233
- # Step 2: Make the end image match the *exact* dimensions of the processed start image.
234
- processed_end_image = resize_and_crop_to_match(end_image_pil, processed_start_image)
235
-
236
- target_height, target_width = processed_start_image.height, processed_start_image.width
237
-
238
- # Handle seed and frame count
239
- current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
240
- num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
241
-
242
- progress(0.2, desc=f"Generating {num_frames} frames at {target_width}x{target_height} (seed: {current_seed})...")
243
-
244
- progress(0.1, desc="Preprocessing images...")
245
- output_video, download_button, seed_input = generate_video_on_gpu(
246
- start_image_pil,
247
- end_image_pil,
248
- prompt,
249
- negative_prompt,
250
- duration_seconds,
251
- steps,
252
- guidance_scale,
253
- guidance_scale_2,
254
- seed,
255
- randomize_seed,
256
- progress,
257
- allocation_time,
258
- factor,
259
- target_height,
260
- target_width,
261
- current_seed,
262
- num_frames,
263
- processed_start_image,
264
- processed_end_image
265
- )
266
- progress(1.0, desc="Done!")
267
- end = time.time()
268
- secondes = int(end - start)
269
- minutes = math.floor(secondes / 60)
270
- secondes = secondes - (minutes * 60)
271
- hours = math.floor(minutes / 60)
272
- minutes = minutes - (hours * 60)
273
- information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
274
- "The video been generated in " + \
275
- ((str(hours) + " h, ") if hours != 0 else "") + \
276
- ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
277
- str(secondes) + " sec. " + \
278
- "The video resolution is " + str(target_width) + \
279
- " pixels large and " + str(target_height) + \
280
- " pixels high, so a resolution of " + f'{target_width * target_height:,}' + " pixels."
281
- return [output_video, download_button, seed_input, gr.update(value = information, visible = True)]
282
-
283
- def get_duration(
284
- start_image_pil,
285
- end_image_pil,
286
- prompt,
287
- negative_prompt,
288
- duration_seconds,
289
- steps,
290
- guidance_scale,
291
- guidance_scale_2,
292
- seed,
293
- randomize_seed,
294
- progress,
295
- allocation_time,
296
- factor,
297
- target_height,
298
- target_width,
299
- current_seed,
300
- num_frames,
301
- processed_start_image,
302
- processed_end_image
303
- ):
304
- return allocation_time
305
-
306
- @spaces.GPU(duration=get_duration)
307
- def generate_video_on_gpu(
308
- start_image_pil,
309
- end_image_pil,
310
- prompt,
311
- negative_prompt,
312
- duration_seconds,
313
- steps,
314
- guidance_scale,
315
- guidance_scale_2,
316
- seed,
317
- randomize_seed,
318
- progress,
319
- allocation_time,
320
- factor,
321
- target_height,
322
- target_width,
323
- current_seed,
324
- num_frames,
325
- processed_start_image,
326
- processed_end_image
327
- ):
328
- """
329
- Generates a video by interpolating between a start and end image, guided by a text prompt,
330
- using the diffusers Wan2.2 pipeline.
331
- """
332
- print("Generate a video with the prompt: " + prompt)
333
-
334
- output_frames_list = pipe(
335
- image=processed_start_image,
336
- last_image=processed_end_image,
337
- prompt=prompt,
338
- negative_prompt=negative_prompt,
339
- height=target_height,
340
- width=target_width,
341
- num_frames=int(num_frames * factor),
342
- guidance_scale=float(guidance_scale),
343
- guidance_scale_2=float(guidance_scale_2),
344
- num_inference_steps=int(steps),
345
- generator=torch.Generator(device="cuda").manual_seed(current_seed),
346
- ).frames[0]
347
-
348
- progress(0.9, desc="Encoding and saving video...")
349
-
350
- video_path = 'wan_' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S.%f") + '.mp4'
351
- print("Exporting video: " + video_path)
352
-
353
- export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
354
- print("Video exported: " + video_path)
355
-
356
- return video_path, gr.update(value = video_path, visible = True), current_seed
357
-
358
-
359
- # --- 3. Gradio User Interface ---
360
-
361
- with gr.Blocks() as app:
362
- gr.Markdown("# Wan 2.2 First/Last Frame Video Fast")
363
- gr.Markdown("Based on the [Wan 2.2 First/Last Frame workflow](https://www.reddit.com/r/StableDiffusion/comments/1me4306/psa_wan_22_does_first_frame_last_frame_out_of_the/), applied to 🧨 Diffusers + [lightx2v/Wan2.2-Lightning](https://huggingface.co/lightx2v/Wan2.2-Lightning) 8-step LoRA")
364
-
365
- with gr.Row(elem_id="general_items"):
366
- with gr.Column():
367
- with gr.Group(elem_id="group_all"):
368
- with gr.Row():
369
- start_image = gr.Image(type="pil", label="Start Frame", sources=["upload", "clipboard"])
370
- # Capture the Tabs component in a variable and assign IDs to tabs
371
- with gr.Tabs(elem_id="group_tabs") as tabs:
372
- with gr.TabItem("Upload", id="upload_tab"):
373
- end_image = gr.Image(type="pil", label="End Frame", sources=["upload", "clipboard"])
374
- with gr.TabItem("Generate", id="generate_tab"):
375
- generate_5seconds = gr.Button("Generate scene 5 seconds in the future", elem_id="fivesec")
376
- gr.Markdown("Generate a custom end-frame with an edit model like [Nano Banana](https://huggingface.co/spaces/multimodalart/nano-banana) or [Qwen Image Edit](https://huggingface.co/spaces/multimodalart/Qwen-Image-Edit-Fast)", elem_id="or_item")
377
- prompt = gr.Textbox(label="Prompt", info="Describe the transition between the two images")
378
-
379
- with gr.Accordion("Advanced Settings", open=False):
380
- duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=2.1, label="Video Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
381
- negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
382
- steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=8, label="Inference Steps")
383
- guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - high noise")
384
- guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - low noise")
385
- with gr.Row():
386
- seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
387
- randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True)
388
-
389
- generate_button = gr.Button("Generate Video", variant="primary")
390
-
391
- with gr.Column():
392
- output_video = gr.Video(label="Generated Video", autoplay = True, loop = True)
393
- download_button = gr.DownloadButton(label="Download", visible = True)
394
- video_information = gr.HTML(value = "", visible = True)
395
-
396
- # Main video generation button
397
- ui_inputs = [
398
- start_image,
399
- end_image,
400
- prompt,
401
- negative_prompt_input,
402
- duration_seconds_input,
403
- steps_slider,
404
- guidance_scale_input,
405
- guidance_scale_2_input,
406
- seed_input,
407
- randomize_seed_checkbox
408
- ]
409
- ui_outputs = [output_video, download_button, seed_input, video_information]
410
-
411
- generate_button.click(
412
- fn=generate_video,
413
- inputs=ui_inputs,
414
- outputs=ui_outputs
415
- )
416
-
417
- generate_5seconds.click(
418
- fn=switch_to_upload_tab,
419
- inputs=None,
420
- outputs=[tabs]
421
- ).then(
422
- fn=lambda img: generate_end_frame(img, "this image is a still frame from a movie. generate a new frame with what happens on this scene 5 seconds in the future"),
423
- inputs=[start_image],
424
- outputs=[end_image]
425
- ).success(
426
- fn=generate_video,
427
- inputs=ui_inputs,
428
- outputs=ui_outputs
429
- )
430
-
431
- with gr.Row(visible=False):
432
- input_image_debug=gr.Image(type="pil", label="Image Debug")
433
- end_image_debug=gr.Image(type="pil", label="End Image Debug")
434
- prompt_debug=gr.Textbox(label="Prompt Debug")
435
- total_second_length_debug=gr.Slider(label="Additional Video Length to Generate (seconds) Debug", minimum=1, maximum=120, value=10, step=0.1)
436
- gr.Examples(
437
- examples=[["ugly_sonic.jpeg", "squatting_sonic.png", "the character dodges the missiles"]],
438
- inputs=[start_image, end_image, prompt],
439
- outputs=ui_outputs,
440
- fn=generate_video,
441
- run_on_click=True,
442
- cache_examples=True,
443
- cache_mode='lazy',
444
- )
445
-
446
- gr.Examples(
447
- label = "Examples from demo",
448
- examples = [
449
- ["poli_tower.png", "tower_takes_off.png", "the man turns around"],
450
- ["ugly_sonic.jpeg", "squatting_sonic.png", "the character dodges the missiles"],
451
- ["capyabara_zoomed.png", "capyabara.webp", "a dramatic dolly zoom"],
452
- ],
453
- inputs = [start_image, end_image, prompt],
454
- outputs = ui_outputs,
455
- fn = generate_video,
456
- cache_examples = False,
457
- )
458
-
459
- def handle_field_debug_change(input_image_debug_data, end_image_debug_data, prompt_debug_data, total_second_length_debug_data):
460
- input_image_debug_value[0] = input_image_debug_data
461
- end_image_debug_value[0] = end_image_debug_data
462
- prompt_debug_value[0] = prompt_debug_data
463
- total_second_length_debug_value[0] = total_second_length_debug_data
464
- return []
465
-
466
- input_image_debug.upload(
467
- fn=handle_field_debug_change,
468
- inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug],
469
- outputs=[]
470
- )
471
-
472
- end_image_debug.upload(
473
- fn=handle_field_debug_change,
474
- inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug],
475
- outputs=[]
476
- )
477
-
478
- prompt_debug.change(
479
- fn=handle_field_debug_change,
480
- inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug],
481
- outputs=[]
482
- )
483
-
484
- total_second_length_debug.change(
485
- fn=handle_field_debug_change,
486
- inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug],
487
- outputs=[]
488
- )
489
-
490
- if __name__ == "__main__":
491
  app.launch(mcp_server=True, share=True)
 
1
+ import os
2
+ # PyTorch 2.8 (temporary hack)
3
+ os.system('pip install --upgrade --pre --extra-index-url https://download.pytorch.org/whl/nightly/cu126 "torch<2.9" spaces')
4
+
5
+ # --- 1. Model Download and Setup (Diffusers Backend) ---
6
+ try:
7
+ import spaces
8
+ except:
9
+ class spaces():
10
+ def GPU(*args, **kwargs):
11
+ def decorator(function):
12
+ return lambda *dummy_args, **dummy_kwargs: function(*dummy_args, **dummy_kwargs)
13
+ return decorator
14
+
15
+ import torch
16
+ from diffusers import FlowMatchEulerDiscreteScheduler
17
+ from diffusers.pipelines.wan.pipeline_wan_i2v import WanImageToVideoPipeline
18
+ from diffusers.models.transformers.transformer_wan import WanTransformer3DModel
19
+ from diffusers.utils.export_utils import export_to_video
20
+ import gradio as gr
21
+ import tempfile
22
+ import time
23
+ from datetime import datetime
24
+ import numpy as np
25
+ from PIL import Image
26
+ import random
27
+ import math
28
+ import gc
29
+ from gradio_client import Client, handle_file # Import for API call
30
+
31
+ # Import the optimization function from the separate file
32
+ from optimization import optimize_pipeline_
33
+
34
+ # --- Constants and Model Loading ---
35
+ MODEL_ID = "Wan-AI/Wan2.2-I2V-A14B-Diffusers"
36
+
37
+ # --- NEW: Flexible Dimension Constants ---
38
+ MAX_DIMENSION = 832
39
+ MIN_DIMENSION = 480
40
+ DIMENSION_MULTIPLE = 16
41
+ SQUARE_SIZE = 480
42
+
43
+ MAX_SEED = np.iinfo(np.int32).max
44
+
45
+ FIXED_FPS = 24
46
+ MIN_FRAMES_MODEL = 8
47
+ MAX_FRAMES_MODEL = 81
48
+
49
+ MIN_DURATION = round(MIN_FRAMES_MODEL/FIXED_FPS, 1)
50
+ MAX_DURATION = round(MAX_FRAMES_MODEL/FIXED_FPS, 1)
51
+
52
+ input_image_debug_value = [None]
53
+ end_image_debug_value = [None]
54
+ prompt_debug_value = [None]
55
+ total_second_length_debug_value = [None]
56
+
57
+ default_negative_prompt = "Vibrant colors, overexposure, static, blurred details, subtitles, error, style, artwork, painting, image, still, overall gray, worst quality, low quality, JPEG compression residue, ugly, mutilated, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, malformed limbs, fused fingers, still image, cluttered background, three legs, many people in the background, walking backwards, overexposure, jumpcut, crossfader, "
58
+
59
+ print("Loading transformer...")
60
+
61
+ transformer = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
62
+ subfolder='transformer',
63
+ torch_dtype=torch.bfloat16,
64
+ device_map='cuda',
65
+ )
66
+
67
+ print("Loadingtransformer 2...")
68
+
69
+ transformer_2 = WanTransformer3DModel.from_pretrained('cbensimon/Wan2.2-I2V-A14B-bf16-Diffusers',
70
+ subfolder='transformer_2',
71
+ torch_dtype=torch.bfloat16,
72
+ device_map='cuda',
73
+ )
74
+
75
+ print("Loading models into memory. This may take a few minutes...")
76
+
77
+ pipe = WanImageToVideoPipeline.from_pretrained(
78
+ MODEL_ID,
79
+ transformer = transformer,
80
+ transformer_2 = transformer_2,
81
+ torch_dtype=torch.bfloat16,
82
+ )
83
+ print("Loading scheduler...")
84
+ pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(pipe.scheduler.config, shift=8.0)
85
+ pipe.to('cuda')
86
+
87
+ print("Clean cache...")
88
+ for i in range(3):
89
+ gc.collect()
90
+ torch.cuda.synchronize()
91
+ torch.cuda.empty_cache()
92
+
93
+ print("Optimizing pipeline...")
94
+
95
+ optimize_pipeline_(pipe,
96
+ image=Image.new('RGB', (MAX_DIMENSION, MIN_DIMENSION)),
97
+ prompt='prompt',
98
+ height=MIN_DIMENSION,
99
+ width=MAX_DIMENSION,
100
+ num_frames=MAX_FRAMES_MODEL,
101
+ )
102
+ print("All models loaded and optimized. Gradio app is ready.")
103
+
104
+
105
+ # --- 2. Image Processing and Application Logic ---
106
+ def generate_end_frame(start_img, gen_prompt, progress=gr.Progress(track_tqdm=True)):
107
+ """Calls an external Gradio API to generate an image."""
108
+ if start_img is None:
109
+ raise gr.Error("Please provide a Start Frame first.")
110
+
111
+ hf_token = os.getenv("HF_TOKEN")
112
+ if not hf_token:
113
+ raise gr.Error("HF_TOKEN not found in environment variables. Please set it in your Space secrets.")
114
+
115
+ with tempfile.NamedTemporaryFile(suffix=".png", delete=False) as tmpfile:
116
+ start_img.save(tmpfile.name)
117
+ tmp_path = tmpfile.name
118
+
119
+ progress(0.1, desc="Connecting to image generation API...")
120
+ client = Client("multimodalart/nano-banana-private")
121
+
122
+ progress(0.5, desc=f"Generating with prompt: '{gen_prompt}'...")
123
+ try:
124
+ result = client.predict(
125
+ prompt=gen_prompt,
126
+ images=[
127
+ {"image": handle_file(tmp_path)}
128
+ ],
129
+ manual_token=hf_token,
130
+ api_name="/unified_image_generator"
131
+ )
132
+ finally:
133
+ os.remove(tmp_path)
134
+
135
+ progress(1.0, desc="Done!")
136
+ print(result)
137
+ return result
138
+
139
+ def switch_to_upload_tab():
140
+ """Returns a gr.Tabs update to switch to the first tab."""
141
+ return gr.Tabs(selected="upload_tab")
142
+
143
+
144
+ def process_image_for_video(image: Image.Image) -> Image.Image:
145
+ """
146
+ Resizes an image based on the following rules for video generation:
147
+ 1. The longest side will be scaled down to MAX_DIMENSION if it's larger.
148
+ 2. The shortest side will be scaled up to MIN_DIMENSION if it's smaller.
149
+ 3. The final dimensions will be rounded to the nearest multiple of DIMENSION_MULTIPLE.
150
+ 4. Square images are resized to a fixed SQUARE_SIZE.
151
+ The aspect ratio is preserved as closely as possible.
152
+ """
153
+ width, height = image.size
154
+
155
+ # Rule 4: Handle square images
156
+ if width == height:
157
+ return image.resize((SQUARE_SIZE, SQUARE_SIZE), Image.Resampling.LANCZOS)
158
+
159
+ # Determine target dimensions while preserving aspect ratio
160
+ aspect_ratio = width / height
161
+ new_width, new_height = width, height
162
+
163
+ # Rule 1: Scale down if too large
164
+ if new_width > MAX_DIMENSION or new_height > MAX_DIMENSION:
165
+ if aspect_ratio > 1: # Landscape
166
+ scale = MAX_DIMENSION / new_width
167
+ else: # Portrait
168
+ scale = MAX_DIMENSION / new_height
169
+ new_width *= scale
170
+ new_height *= scale
171
+
172
+ # Rule 2: Scale up if too small
173
+ if new_width < MIN_DIMENSION or new_height < MIN_DIMENSION:
174
+ if aspect_ratio > 1: # Landscape
175
+ scale = MIN_DIMENSION / new_height
176
+ else: # Portrait
177
+ scale = MIN_DIMENSION / new_width
178
+ new_width *= scale
179
+ new_height *= scale
180
+
181
+ # Rule 3: Round to the nearest multiple of DIMENSION_MULTIPLE
182
+ final_width = int(round(new_width / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
183
+ final_height = int(round(new_height / DIMENSION_MULTIPLE) * DIMENSION_MULTIPLE)
184
+
185
+ # Ensure final dimensions are at least the minimum
186
+ final_width = max(final_width, MIN_DIMENSION if aspect_ratio < 1 else SQUARE_SIZE)
187
+ final_height = max(final_height, MIN_DIMENSION if aspect_ratio > 1 else SQUARE_SIZE)
188
+
189
+
190
+ return image.resize((final_width, final_height), Image.Resampling.LANCZOS)
191
+
192
+ def resize_and_crop_to_match(target_image, reference_image):
193
+ """Resizes and center-crops the target image to match the reference image's dimensions."""
194
+ ref_width, ref_height = reference_image.size
195
+ target_width, target_height = target_image.size
196
+ scale = max(ref_width / target_width, ref_height / target_height)
197
+ new_width, new_height = int(target_width * scale), int(target_height * scale)
198
+ resized = target_image.resize((new_width, new_height), Image.Resampling.LANCZOS)
199
+ left, top = (new_width - ref_width) // 2, (new_height - ref_height) // 2
200
+ return resized.crop((left, top, left + ref_width, top + ref_height))
201
+
202
+ def generate_video(
203
+ start_image_pil,
204
+ end_image_pil,
205
+ prompt,
206
+ negative_prompt=default_negative_prompt,
207
+ duration_seconds=2.1,
208
+ steps=8,
209
+ guidance_scale=1,
210
+ guidance_scale_2=1,
211
+ seed=42,
212
+ randomize_seed=True,
213
+ progress=gr.Progress(track_tqdm=True)
214
+ ):
215
+ start = time.time()
216
+ allocation_time = 120
217
+ factor = 1
218
+
219
+ if input_image_debug_value[0] is not None or end_image_debug_value[0] is not None or prompt_debug_value[0] is not None or total_second_length_debug_value[0] is not None:
220
+ start_image_pil = input_image_debug_value[0]
221
+ end_image_pil = end_image_debug_value[0]
222
+ prompt = prompt_debug_value[0]
223
+ duration_seconds = total_second_length_debug_value[0]
224
+ allocation_time = min(duration_seconds * 60 * 100, 10 * 60)
225
+ factor = 3.1
226
+
227
+ if start_image_pil is None or end_image_pil is None:
228
+ raise gr.Error("Please upload both a start and an end image.")
229
+
230
+ # Step 1: Process the start image to get our target dimensions based on the new rules.
231
+ processed_start_image = process_image_for_video(start_image_pil)
232
+
233
+ # Step 2: Make the end image match the *exact* dimensions of the processed start image.
234
+ processed_end_image = resize_and_crop_to_match(end_image_pil, processed_start_image)
235
+
236
+ target_height, target_width = processed_start_image.height, processed_start_image.width
237
+
238
+ # Handle seed and frame count
239
+ current_seed = random.randint(0, MAX_SEED) if randomize_seed else int(seed)
240
+ num_frames = np.clip(int(round(duration_seconds * FIXED_FPS)), MIN_FRAMES_MODEL, MAX_FRAMES_MODEL)
241
+
242
+ progress(0.2, desc=f"Generating {num_frames} frames at {target_width}x{target_height} (seed: {current_seed})...")
243
+
244
+ progress(0.1, desc="Preprocessing images...")
245
+ output_video, download_button, seed_input = generate_video_on_gpu(
246
+ start_image_pil,
247
+ end_image_pil,
248
+ prompt,
249
+ negative_prompt,
250
+ duration_seconds,
251
+ steps,
252
+ guidance_scale,
253
+ guidance_scale_2,
254
+ seed,
255
+ randomize_seed,
256
+ progress,
257
+ allocation_time,
258
+ factor,
259
+ target_height,
260
+ target_width,
261
+ current_seed,
262
+ num_frames,
263
+ processed_start_image,
264
+ processed_end_image
265
+ )
266
+ progress(1.0, desc="Done!")
267
+ end = time.time()
268
+ secondes = int(end - start)
269
+ minutes = math.floor(secondes / 60)
270
+ secondes = secondes - (minutes * 60)
271
+ hours = math.floor(minutes / 60)
272
+ minutes = minutes - (hours * 60)
273
+ information = ("Start the process again if you want a different result. " if randomize_seed else "") + \
274
+ "The video been generated in " + \
275
+ ((str(hours) + " h, ") if hours != 0 else "") + \
276
+ ((str(minutes) + " min, ") if hours != 0 or minutes != 0 else "") + \
277
+ str(secondes) + " sec. " + \
278
+ "The video resolution is " + str(target_width) + \
279
+ " pixels large and " + str(target_height) + \
280
+ " pixels high, so a resolution of " + f'{target_width * target_height:,}' + " pixels."
281
+ return [output_video, download_button, seed_input, gr.update(value = information, visible = True)]
282
+
283
+ def get_duration(
284
+ start_image_pil,
285
+ end_image_pil,
286
+ prompt,
287
+ negative_prompt,
288
+ duration_seconds,
289
+ steps,
290
+ guidance_scale,
291
+ guidance_scale_2,
292
+ seed,
293
+ randomize_seed,
294
+ progress,
295
+ allocation_time,
296
+ factor,
297
+ target_height,
298
+ target_width,
299
+ current_seed,
300
+ num_frames,
301
+ processed_start_image,
302
+ processed_end_image
303
+ ):
304
+ return allocation_time
305
+
306
+ @spaces.GPU(duration=get_duration)
307
+ def generate_video_on_gpu(
308
+ start_image_pil,
309
+ end_image_pil,
310
+ prompt,
311
+ negative_prompt,
312
+ duration_seconds,
313
+ steps,
314
+ guidance_scale,
315
+ guidance_scale_2,
316
+ seed,
317
+ randomize_seed,
318
+ progress,
319
+ allocation_time,
320
+ factor,
321
+ target_height,
322
+ target_width,
323
+ current_seed,
324
+ num_frames,
325
+ processed_start_image,
326
+ processed_end_image
327
+ ):
328
+ """
329
+ Generates a video by interpolating between a start and end image, guided by a text prompt,
330
+ using the diffusers Wan2.2 pipeline.
331
+ """
332
+ print("Generate a video with the prompt: " + prompt)
333
+
334
+ output_frames_list = pipe(
335
+ image=processed_start_image,
336
+ last_image=processed_end_image,
337
+ prompt=prompt,
338
+ negative_prompt=negative_prompt,
339
+ height=target_height,
340
+ width=target_width,
341
+ num_frames=int(num_frames * factor),
342
+ guidance_scale=float(guidance_scale),
343
+ guidance_scale_2=float(guidance_scale_2),
344
+ num_inference_steps=int(steps),
345
+ generator=torch.Generator(device="cuda").manual_seed(current_seed),
346
+ ).frames[0]
347
+
348
+ progress(0.9, desc="Encoding and saving video...")
349
+
350
+ video_path = 'wan_' + datetime.now().strftime("%Y-%m-%d_%H-%M-%S.%f") + '.mp4'
351
+ print("Exporting video: " + video_path)
352
+
353
+ export_to_video(output_frames_list, video_path, fps=FIXED_FPS)
354
+ print("Video exported: " + video_path)
355
+
356
+ return video_path, gr.update(value = video_path, visible = True), current_seed
357
+
358
+
359
+ # --- 3. Gradio User Interface ---
360
+
361
+ with gr.Blocks() as app:
362
+ gr.Markdown("# Wan 2.2 First/Last Frame Video Fast")
363
+ gr.Markdown("Based on the [Wan 2.2 First/Last Frame workflow](https://www.reddit.com/r/StableDiffusion/comments/1me4306/psa_wan_22_does_first_frame_last_frame_out_of_the/), applied to 🧨 Diffusers + [lightx2v/Wan2.2-Lightning](https://huggingface.co/lightx2v/Wan2.2-Lightning) 8-step LoRA")
364
+
365
+ with gr.Row(elem_id="general_items"):
366
+ with gr.Column():
367
+ with gr.Group(elem_id="group_all"):
368
+ with gr.Row():
369
+ start_image = gr.Image(type="pil", label="Start Frame", sources=["upload", "clipboard"])
370
+ # Capture the Tabs component in a variable and assign IDs to tabs
371
+ with gr.Tabs(elem_id="group_tabs") as tabs:
372
+ with gr.TabItem("Upload", id="upload_tab"):
373
+ end_image = gr.Image(type="pil", label="End Frame", sources=["upload", "clipboard"])
374
+ with gr.TabItem("Generate", id="generate_tab"):
375
+ generate_5seconds = gr.Button("Generate scene 5 seconds in the future", elem_id="fivesec")
376
+ gr.Markdown("Generate a custom end-frame with an edit model like [Nano Banana](https://huggingface.co/spaces/multimodalart/nano-banana) or [Qwen Image Edit](https://huggingface.co/spaces/multimodalart/Qwen-Image-Edit-Fast)", elem_id="or_item")
377
+ prompt = gr.Textbox(label="Prompt", info="Describe the transition between the two images")
378
+
379
+ with gr.Accordion("Advanced Settings", open=False):
380
+ duration_seconds_input = gr.Slider(minimum=MIN_DURATION, maximum=MAX_DURATION, step=0.1, value=2.1, label="Video Duration (seconds)", info=f"Clamped to model's {MIN_FRAMES_MODEL}-{MAX_FRAMES_MODEL} frames at {FIXED_FPS}fps.")
381
+ negative_prompt_input = gr.Textbox(label="Negative Prompt", value=default_negative_prompt, lines=3)
382
+ steps_slider = gr.Slider(minimum=1, maximum=30, step=1, value=8, label="Inference Steps")
383
+ guidance_scale_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - high noise")
384
+ guidance_scale_2_input = gr.Slider(minimum=0.0, maximum=10.0, step=0.5, value=1.0, label="Guidance Scale - low noise")
385
+ with gr.Row():
386
+ seed_input = gr.Slider(label="Seed", minimum=0, maximum=MAX_SEED, step=1, value=42)
387
+ randomize_seed_checkbox = gr.Checkbox(label="Randomize seed", value=True)
388
+
389
+ generate_button = gr.Button("Generate Video", variant="primary")
390
+
391
+ with gr.Column():
392
+ output_video = gr.Video(label="Generated Video", autoplay = True, loop = True)
393
+ download_button = gr.DownloadButton(label="Download", visible = True)
394
+ video_information = gr.HTML(value = "", visible = True)
395
+
396
+ # Main video generation button
397
+ ui_inputs = [
398
+ start_image,
399
+ end_image,
400
+ prompt,
401
+ negative_prompt_input,
402
+ duration_seconds_input,
403
+ steps_slider,
404
+ guidance_scale_input,
405
+ guidance_scale_2_input,
406
+ seed_input,
407
+ randomize_seed_checkbox
408
+ ]
409
+ ui_outputs = [output_video, download_button, seed_input, video_information]
410
+
411
+ generate_button.click(
412
+ fn=generate_video,
413
+ inputs=ui_inputs,
414
+ outputs=ui_outputs
415
+ )
416
+
417
+ generate_5seconds.click(
418
+ fn=switch_to_upload_tab,
419
+ inputs=None,
420
+ outputs=[tabs]
421
+ ).then(
422
+ fn=lambda img: generate_end_frame(img, "this image is a still frame from a movie. generate a new frame with what happens on this scene 5 seconds in the future"),
423
+ inputs=[start_image],
424
+ outputs=[end_image]
425
+ ).success(
426
+ fn=generate_video,
427
+ inputs=ui_inputs,
428
+ outputs=ui_outputs
429
+ )
430
+
431
+ with gr.Row(visible=False):
432
+ input_image_debug=gr.Image(type="pil", label="Image Debug")
433
+ end_image_debug=gr.Image(type="pil", label="End Image Debug")
434
+ prompt_debug=gr.Textbox(label="Prompt Debug")
435
+ total_second_length_debug=gr.Slider(label="Additional Video Length to Generate (seconds) Debug", minimum=1, maximum=120, value=10, step=0.1)
436
+ gr.Examples(
437
+ examples=[["ugly_sonic.jpeg", "squatting_sonic.png", "the character dodges the missiles"]],
438
+ inputs=[start_image, end_image, prompt],
439
+ outputs=ui_outputs,
440
+ fn=generate_video,
441
+ run_on_click=True,
442
+ cache_examples=True,
443
+ cache_mode='lazy',
444
+ )
445
+
446
+ gr.Examples(
447
+ label = "Examples from demo",
448
+ examples = [
449
+ ["poli_tower.png", "tower_takes_off.png", "The man turns around."],
450
+ ["ugly_sonic.jpeg", "squatting_sonic.png", "पात्रं क्षेपणास्त्रं चकमाति।"],
451
+ ["Schoolboy_without_backpack.webp", "Schoolboy_with_backpack.webp", "The schoolchild puts on their schoolbag."],
452
+ ],
453
+ inputs = [start_image, end_image, prompt],
454
+ outputs = ui_outputs,
455
+ fn = generate_video,
456
+ cache_examples = False,
457
+ )
458
+
459
+ def handle_field_debug_change(input_image_debug_data, end_image_debug_data, prompt_debug_data, total_second_length_debug_data):
460
+ input_image_debug_value[0] = input_image_debug_data
461
+ end_image_debug_value[0] = end_image_debug_data
462
+ prompt_debug_value[0] = prompt_debug_data
463
+ total_second_length_debug_value[0] = total_second_length_debug_data
464
+ return []
465
+
466
+ input_image_debug.upload(
467
+ fn=handle_field_debug_change,
468
+ inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug],
469
+ outputs=[]
470
+ )
471
+
472
+ end_image_debug.upload(
473
+ fn=handle_field_debug_change,
474
+ inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug],
475
+ outputs=[]
476
+ )
477
+
478
+ prompt_debug.change(
479
+ fn=handle_field_debug_change,
480
+ inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug],
481
+ outputs=[]
482
+ )
483
+
484
+ total_second_length_debug.change(
485
+ fn=handle_field_debug_change,
486
+ inputs=[input_image_debug, end_image_debug, prompt_debug, total_second_length_debug],
487
+ outputs=[]
488
+ )
489
+
490
+ if __name__ == "__main__":
491
  app.launch(mcp_server=True, share=True)