#!/usr/bin/env python import os import pathlib import tempfile import gradio as gr import torch from huggingface_hub import snapshot_download from modelscope.outputs import OutputKeys from modelscope.pipelines import pipeline DESCRIPTION = """# I2VGen-XL I2VGen-XL can generate videos that are semantically similar to the input image and text. The generated videos are high-definition (1280 * 720), wide-screen (16:9), temporally coherent, and have good texture. """ if torch.cuda.is_available(): model_cache_dir = os.getenv("MODEL_CACHE_DIR", "./models") image2video_model_dir = pathlib.Path(model_cache_dir) / "MS-Image2Video" snapshot_download(repo_id="damo-vilab/MS-Image2Video", repo_type="model", local_dir=image2video_model_dir) image_to_video_pipe = pipeline( task="image-to-video", model=image2video_model_dir.as_posix(), model_revision="v1.1.0", device="cuda:0" ) video2video_model_dir = pathlib.Path(model_cache_dir) / "MS-Vid2Vid-XL" snapshot_download(repo_id="damo-vilab/MS-Vid2Vid-XL", repo_type="model", local_dir=video2video_model_dir) video_to_video_pipe = pipeline( task="video-to-video", model=video2video_model_dir.as_posix(), model_revision="v1.1.0", device="cuda:0" ) else: image_to_video_pipe = None video_to_video_pipe = None def image_to_video(image_path: str) -> str: output_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) image_to_video_pipe(image_path, output_video=output_file.name)[OutputKeys.OUTPUT_VIDEO] return output_file.name def video_to_video(video_path: str, text: str) -> str: p_input = {"video_path": video_path, "text": text} output_file = tempfile.NamedTemporaryFile(suffix=".mp4", delete=False) video_to_video_pipe(p_input, output_video=output_file.name)[OutputKeys.OUTPUT_VIDEO] return output_file.name with gr.Blocks(css="style.css") as demo: gr.Markdown(DESCRIPTION) with gr.Box(): gr.Markdown('Step 1: Upload an image and click the "Generate video" button.') with gr.Row(): with gr.Column(): input_image = gr.Image(label="Input image", type="filepath", height=300) i2v_button = gr.Button("Generate video") with gr.Column(): output_video_1 = gr.Video(label="Output video 1", interactive=False, height=300) with gr.Box(): gr.Markdown( 'Step 2: Add an English text description of the video content and click the "Generate high-resolution video" button.' ) with gr.Row(): with gr.Column(): text_description = gr.Textbox(label="Text description") v2v_button = gr.Button("Generate high-resolution video") with gr.Column(): output_video_2 = gr.Video(label="Output video 2", height=300) i2v_button.click( fn=image_to_video, inputs=input_image, outputs=output_video_1, api_name="image-to-video", ) v2v_button.click( fn=video_to_video, inputs=[output_video_1, text_description], outputs=output_video_2, api_name="video-to-video", ) if __name__ == "__main__": demo.queue(max_size=10, api_open=False).launch()