Spaces:
Runtime error
Runtime error
| import torch | |
| import gradio as gr | |
| from diffusers import StableVideoDiffusionPipeline | |
| from diffusers.utils import export_to_video | |
| # Załaduj pipeline modelu z Hugging Face | |
| pipe = StableVideoDiffusionPipeline.from_pretrained( | |
| "stabilityai/stable-video-diffusion-img2vid", | |
| torch_dtype=torch.float16, | |
| variant="fp16" | |
| ).to("cuda") | |
| # Funkcja do generowania wideo z obrazu | |
| def generate_video(input_image): | |
| # Wymagany preprocessing: zmień rozmiar do 576x1024 | |
| input_image = input_image.resize((576, 1024)) | |
| result = pipe(input_image, decode_chunk_size=8) | |
| video_path = export_to_video(result.frames[0]) | |
| return video_path | |
| # Interfejs Gradio | |
| gr.Interface( | |
| fn=generate_video, | |
| inputs=gr.Image(type="pil", label="Wgraj obraz"), | |
| outputs=gr.Video(label="Wygenerowane wideo"), | |
| title="Stable Video Diffusion - Obraz na Wideo", | |
| description="Ten model zamienia jeden obraz w animację wideo (ok. 14 klatek, 576x1024px)." | |
| ).launch() | |