File size: 994 Bytes
f3936f7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 |
import torch
import os
import gradio as gr
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler
from diffusers.utils import export_to_video
from IPython.display import HTML
from base64 import b64encode
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16")
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
pipe.enable_model_cpu_offload()
pipe.enable_vae_slicing()
def model(txt, time):
prompt = txt
video_duration_seconds = time
num_frames = video_duration_seconds * 10
video_frames = pipe(prompt, negative_prompt="low quality",
num_inference_steps=25, num_frames=num_frames).frames
video_path = export_to_video(video_frames)
return video_path
demo = gr.Interface(
fn=model,
inputs=["text", gr.Slider(1, 10, step=1)],
outputs=gr.Video(label="Out",output_width=400, output_height=300)
)
demo.launch(inline = False)
|