Spaces:
Runtime error
Runtime error
import os | |
os.system("pip install torch") | |
os.system("pip install diffusers") | |
os.system("python -m pip install --upgrade pip") | |
os.system("pip install imageio") | |
os.system("pip install numpy") | |
os.system("pip install transformers") | |
''' | |
import torch | |
from diffusers import DiffusionPipeline, DPMSolverMultistepScheduler | |
from diffusers.utils import export_to_video | |
import gradio as gr | |
pipe = DiffusionPipeline.from_pretrained("damo-vilab/text-to-video-ms-1.7b", torch_dtype=torch.float16, variant="fp16") | |
pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config) | |
pipe.enable_model_cpu_offload() | |
def text_video(prompt): | |
video_frames = pipe(prompt, num_inference_steps=25).frames | |
video_path = export_to_video(video_frames) | |
result = gr.Video(label="Generated Video") | |
gr.Interface( | |
fn=text_video, | |
inputs=gr.Textbox(label="어떤 비디오를 생성할까요? : "), | |
outputs=result | |
).launch()''' | |
import torch | |
import imageio | |
from diffusers import TextToVideoZeroPipeline | |
import numpy as np | |
model_id = "runwayml/stable-diffusion-v1-5" | |
pipe = TextToVideoZeroPipeline.from_pretrained(model_id, torch_dtype=torch.float16).to("cuda") | |
seed = 0 | |
video_length = 8 | |
chunk_size = 4 | |
prompt = "A panda is playing guitar on times square" | |
# Generate the video chunk-by-chunk | |
result = [] | |
chunk_ids = np.arange(0, video_length, chunk_size - 1) | |
generator = torch.Generator(device="cuda") | |
for i in range(len(chunk_ids)): | |
print(f"Processing chunk {i + 1} / {len(chunk_ids)}") | |
ch_start = chunk_ids[i] | |
ch_end = video_length if i == len(chunk_ids) - 1 else chunk_ids[i + 1] | |
# Attach the first frame for Cross Frame Attention | |
frame_ids = [0] + list(range(ch_start, ch_end)) | |
# Fix the seed for the temporal consistency | |
generator.manual_seed(seed) | |
output = pipe(prompt=prompt, video_length=len(frame_ids), generator=generator, frame_ids=frame_ids) | |
result.append(output.images[1:]) | |
# Concatenate chunks and save | |
result = np.concatenate(result) | |
result = [(r * 255).astype("uint8") for r in result] | |
imageio.mimsave("video.mp4", result, fps=4) | |