import os import gradio as gr from modelscope.pipelines import pipeline from modelscope.outputs import OutputKeys image_to_video_pipe = pipeline(task="image-to-video", model='damo/i2vgen-xl', model_revision='v1.1.4', device='cuda:0') # Get the current working directory current_directory = os.path.dirname(os.path.realpath(__file__)) print("Current Working Directory:", current_directory) def upload_file(file): return file.name def image_to_video(image_in, text_in): if image_in is None: raise gr.Error('Please upload the image or wait for the image upload to complete / 请上传图片或等待图片上传完成') print(image_in) output_video_path = image_to_video_pipe(image_in, caption=text_in)[OutputKeys.OUTPUT_VIDEO] print(output_video_path) return output_video_path with gr.Blocks() as demo: gr.Markdown( """
I2VGen-XL
I2VGen-XL可以根据用户输入的静态图像和文本生成目标接近、语义相同的视频,生成的视频具高清(1280 * 720)、宽屏(16:9)、时序连贯、质感好等特点。 I2VGen-XL can generate videos with similar contents and semantics based on user input static images and text. The generated videos have characteristics such as high-definition (1280 * 720), widescreen (16:9), coherent timing, and good texture. """ ) with gr.Box(): gr.Markdown( """选择合适的图片进行上传,并补充对视频内容的英文文本描述,然后点击“生成视频”。 Please choose the image to upload (we recommend the image size be 1280 * 720), provide the English text description of the video you wish to create, and then click on "Generate Video" to receive the generated video.""" ) with gr.Row(): with gr.Column(): text_in = gr.Textbox(label="Text description/文本描述", lines=2, elem_id="text-in") image_in = gr.Image(label="Image Input/图片输入", type="filepath", interactive=False, elem_id="image-in", height=300) with gr.Row(): upload_image = gr.UploadButton("Upload Image/上传图片", file_types=["image"], file_count="single") image_submit = gr.Button("Generate video/生成视频🎬") with gr.Column(): video_out_1 = gr.Video(label='Generated Video/生生成的视频', elem_id='video-out_1', interactive=False, height=300) gr.Markdown("""注:如果生成的视频无法播放,请尝试升级浏览器或使用chrome浏览器。 Note: If the generated video cannot be played, please try to upgrade your browser or use the Chrome browser.""" ) upload_image.upload(upload_file, upload_image, image_in, queue=False) image_submit.click(fn=image_to_video, inputs=[image_in, text_in], outputs=[video_out_1]) with gr.Row(variant="panel"): gr.Examples(examples=[ [os.path.join(current_directory, 'example_images/sample2.png'), 'A girl with yellow hair and black clothes stood in front of the camera'], [os.path.join(current_directory, 'example_images/sample13.png'), 'A girl in hat and sunglasses'], [os.path.join(current_directory, 'example_images/sample6.png'), 'Papers were floating in the air on a table in the library'], [os.path.join(current_directory, 'example_images/sample12.jpg'),'Night sky lit with milky way galaxy'], [os.path.join(current_directory, 'example_images/sample1.png'), 'A blonde girl in jeans'], [os.path.join(current_directory, 'example_images/sample11.jpg'),'A wet road between the woods'], [os.path.join(current_directory, 'example_images/sample5.png'), 'A painting of a city street with a giant monster'], [os.path.join(current_directory, 'example_images/sample7.png'), 'A red woodcut bird'], [os.path.join(current_directory, 'example_images/sample8.jpg'), 'A green frog floats on the surface of the water on green lotus leaves, with several pink lotus flowers, in a Chinese painting style.'] ], inputs=[image_in, text_in], outputs=[video_out_1], fn=image_to_video, cache_examples=True, examples_per_page=5, label='Examples', ) demo.queue(status_update_rate=1, api_open=False).launch(share=False, show_error=True)