Spaces:
Running
Running
File size: 1,777 Bytes
63a57ba fe3f0e0 6db29bb 63a57ba fe3f0e0 2d35945 fe3f0e0 63a57ba fe3f0e0 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 |
import gradio as gr
import moviepy.editor as mp
from diffusers import DiffusionPipeline
import spaces
# Load diffusion pipelines
image_pipeline = DiffusionPipeline.from_pretrained("stabilityai/sdxl-turbo")
video_pipeline = DiffusionPipeline.from_pretrained("stabilityai/stable-video-diffusion-img2vid-xt-1-1")
@spaces.GPU(duration=120)
def generate_images(prompt, num_images):
"""Generates images using the image pipeline."""
images = []
for _ in range(num_images):
generated_image = image_pipeline.pipe(prompt=prompt).images[0]
images.append(generated_image)
return images
@spaces.GPU(duration=120)
def generate_videos(images):
"""Generates videos from a list of images using the video pipeline."""
videos = []
for image in images:
generated_video = video_pipeline.pipe(images=image).images[0]
videos.append(generated_video)
return videos
def combine_videos(video_clips):
"""Combines video clips into a single video using moviepy."""
final_clip = mp.concatenate_videoclips(video_clips)
return final_clip
def generate(prompt, num_images):
"""Generates and combines images and videos."""
images = generate_images(prompt, num_images)
video_clips = generate_videos(images)
combined_video = combine_videos(video_clips)
return combined_video
# Gradio interface with improved formatting and video output
interface = gr.Interface(
fn=generate,
inputs="text",
outputs="video",
interpretation="markdown",
title="AI Video Generation",
description="Enter a prompt to generate a video using diffusion models.",
css="""
.output-video {
width: 100%; /* Adjust width as needed */
height: 400px; /* Adjust height as desired */
}
""",
)
# Launch the interface
interface.launch()
|