import torch import gradio as gr from diffusers import StableVideoDiffusionPipeline from diffusers.utils import load_image, export_to_video import spaces # Check if GPU is available device = "cuda" if torch.cuda.is_available() else "cpu" # Load the pipeline pipeline = StableVideoDiffusionPipeline.from_pretrained( "stabilityai/stable-video-diffusion-img2vid-xt", torch_dtype=torch.float16, variant="fp16" ) pipeline.to(device) @spaces.GPU(duration=120) def generate_video(image_path, seed): # Load and preprocess the image image = load_image(image_path) image = image.resize((1024, 576)) # Set the generator seed generator = torch.Generator(device=device).manual_seed(seed) # Generate the video frames frames = pipeline(image, decode_chunk_size=8, generator=generator).frames[0] # Export the frames to a video file output_video_path = "generated.mp4" export_to_video(frames, output_video_path, fps=7) return output_video_path # Create the Gradio interface iface = gr.Interface( fn=generate_video, inputs=[ gr.Image(type="filepath", label="Upload Image"), gr.Number(label="Seed", value=42) ], outputs=gr.Video(label="Generated Video"), title="Stable Video Diffusion", description="Generate a video from an uploaded image using Stable Video Diffusion." examples=[ ["image.png", "generated.mp4"] ] ) # Launch the interface iface.launch()