| import os |
| import torch |
| import gradio as gr |
| import numpy as np |
| from PIL import Image |
| import imageio |
|
|
| from diffusers import StableVideoDiffusionPipeline |
|
|
| |
| |
| |
| MODEL_ID = "stabilityai/stable-video-diffusion-img2vid" |
| OUTPUT_DIR = "output" |
|
|
| NUM_FRAMES = 14 |
| FPS = 7 |
| SEED = 42 |
|
|
| os.makedirs(OUTPUT_DIR, exist_ok=True) |
|
|
| DEVICE = "cuda" if torch.cuda.is_available() else "cpu" |
| DTYPE = torch.float16 if DEVICE == "cuda" else torch.float32 |
|
|
| |
| |
| |
| pipe = StableVideoDiffusionPipeline.from_pretrained( |
| MODEL_ID, |
| torch_dtype=DTYPE |
| ) |
|
|
| pipe.to(DEVICE) |
| pipe.enable_attention_slicing() |
|
|
| if DEVICE == "cuda": |
| pipe.enable_model_cpu_offload() |
|
|
| generator = torch.Generator(device=DEVICE).manual_seed(SEED) |
|
|
| |
| |
| |
| def save_video(frames, path, fps): |
| imageio.mimsave(path, frames, fps=fps) |
|
|
| |
| |
| |
| def images_to_videos(files): |
| if files is None or len(files) == 0: |
| return None |
|
|
| output_videos = [] |
|
|
| for idx, file in enumerate(files): |
| image = Image.open(file.name).convert("RGB") |
| image = image.resize((768, 432)) |
|
|
| with torch.autocast(DEVICE if DEVICE == "cuda" else "cpu"): |
| result = pipe( |
| image=image, |
| num_frames=NUM_FRAMES, |
| generator=generator |
| ) |
|
|
| frames = [ |
| (frame * 255).astype(np.uint8) |
| for frame in result.frames[0] |
| ] |
|
|
| out_path = os.path.join(OUTPUT_DIR, f"video_{idx}.mp4") |
| save_video(frames, out_path, FPS) |
|
|
| output_videos.append(out_path) |
|
|
| return output_videos |
|
|
| |
| |
| |
| with gr.Blocks(title="Image to Video (HF Spaces Safe)") as demo: |
| gr.Markdown( |
| """ |
| ## π₯ Image β Video AI (Hugging Face Spaces) |
| - Upload **multiple images** |
| - Output **MP4** |
| - CPU & GPU compatible |
| """ |
| ) |
|
|
| image_input = gr.File( |
| file_types=["image"], |
| file_count="multiple", |
| label="Upload Images" |
| ) |
|
|
| video_output = gr.Video(label="Generated Video") |
|
|
| generate_btn = gr.Button("π Generate") |
|
|
| def process(files): |
| videos = images_to_videos(files) |
| return videos[0] if videos else None |
|
|
| generate_btn.click( |
| fn=process, |
| inputs=image_input, |
| outputs=video_output |
| ) |
|
|
| demo.launch() |