import gradio as gr import torch from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler from diffusers.utils import export_to_video device = "cuda" if torch.cuda.is_available() else "cpu" #model = gr.Interface.load("models/camenduru/text2_video_zero") # load pipeline pipe = DiffusionPipeline.from_pretrained("camenduru/text2-video-zero").to(device) pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) #if device == "cuda": # optimize for GPU memory # pipe.enable_model_cpu_offload() #else: # pass #pipe.enable_vae_slicing() def ttv(): # generate prompt = "Spiderman is surfing. Darth Vader is also surfing and following Spiderman" video_frames = model(prompt, num_inference_steps=25, num_frames=20) # convent to video #video_path = export_to_video(video_frames) return video_frames with gr.Blocks() as app: inp = gr.Textbox() btn = gr.Button() outp = gr.Gallery() btn.click(ttv,None,outp) app.launch()