|
import gradio as gr |
|
import torch |
|
from diffusers import DiffusionPipeline |
|
|
|
# 加載模型 |
|
pipe = DiffusionPipeline.from_pretrained("cerspense/zeroscope_v2_576w", torch_dtype=torch.float16) |
|
pipe = pipe.to("cuda") |
|
|
|
# 定義生成視頻的函數 |
|
def text_to_video(prompt): |
|
video_frames = pipe(prompt, num_inference_steps=50).frames |
|
# 將幀轉換為視頻並返回 |
|
return video_frames |
|
|
|
# Gradio UI 設置 |
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Text to Video Generator") |
|
with gr.Row(): |
|
with gr.Column(): |
|
prompt = gr.Textbox(label="Enter your prompt") |
|
generate_btn = gr.Button("Generate Video") |
|
with gr.Column(): |
|
video_output = gr.Video(label="Generated Video") |
|
|
|
generate_btn.click(fn=text_to_video, inputs=prompt, outputs=video_output) |
|
|
|
# 啟動 Gradio 應用 |
|
demo.launch() |
|
|