Spaces:
Running
Running
import gradio as gr | |
import moviepy.editor as mp | |
from diffusers import DiffusionPipeline | |
import spaces | |
# Load diffusion pipelines | |
image_pipeline = DiffusionPipeline.from_pretrained("ByteDance/SDXL-Lightning") | |
video_pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt-1-1") | |
def generate_images(prompt, num_images): | |
"""Generates images using the image pipeline.""" | |
images = [] | |
for _ in range(num_images): | |
generated_image = image_pipeline.pipe(prompt=prompt).images[0] | |
images.append(generated_image) | |
return images | |
def generate_videos(images): | |
"""Generates videos from a list of images using the video pipeline.""" | |
videos = [] | |
for image in images: | |
generated_video = video_pipeline.pipe(images=image).images[0] | |
videos.append(generated_video) | |
return videos | |
def combine_videos(video_clips): | |
"""Combines video clips into a single video using moviepy.""" | |
final_clip = mp.concatenate_videoclips(video_clips) | |
return final_clip | |
def generate(prompt, num_images): | |
"""Generates and combines images and videos.""" | |
images = generate_images(prompt, num_images) | |
video_clips = generate_videos(images) | |
combined_video = combine_videos(video_clips) | |
return combined_video | |
# Gradio interface with improved formatting and video output | |
interface = gr.Interface( | |
fn=generate, | |
inputs="text", | |
outputs="video", | |
interpretation="markdown", | |
title="AI Video Generation", | |
description="Enter a prompt to generate a video using diffusion models.", | |
css=""" | |
.output-video { | |
width: 100%; /* Adjust width as needed */ | |
height: 400px; /* Adjust height as desired */ | |
} | |
""", | |
) | |
# Launch the interface | |
interface.launch() | |