Spaces:
Runtime error
Runtime error
import gradio as gr | |
import spaces | |
import torch | |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler | |
from diffusers.utils import export_to_video | |
import cv2 | |
import numpy as np | |
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") | |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) | |
pipe.enable_model_cpu_offload() | |
def generate(prompt, num_inference_steps=25): | |
video_frames = pipe(prompt, num_inference_steps).frames | |
resized_frames = [] | |
for frame in video_frames: | |
height, width, _ = frame.shape | |
new_height = (height // 8) * 8 | |
new_width = (width // 8) * 8 | |
resized_frame = cv2.resize(frame, (new_width, new_height)) | |
resized_frames.append(resized_frame) | |
video_path = export_to_video(np.array(resized_frames)) | |
return video_path | |
prompt = gr.Textbox("Enter prompt to generate a video") | |
num_inference_steps = gr.Slider(10, 50, value=25) | |
interface = gr.Interface( | |
generate, | |
inputs=[prompt, num_inference_steps], | |
examples=[["Astronaut riding a horse", 25], ["Darth vader surfing in waves", 20]], | |
outputs="video", | |
cache_examples=False, | |
theme="soft" | |
).launch() |