import os
import gradio as gr
from modelscope.pipelines import pipeline
from modelscope.outputs import OutputKeys
image_to_video_pipe = pipeline(task="image-to-video", model='damo/i2vgen-xl', model_revision='v1.1.4', device='cuda:0')
# Get the current working directory
current_directory = os.path.dirname(os.path.realpath(__file__))
print("", current_directory)
def upload_file(file):
return file.name
def image_to_video(image_in, text_in):
if image_in is None:
raise gr.Error('Please upload the image or wait for the image upload to complete')
print(image_in)
output_video_path = image_to_video_pipe(image_in, caption=text_in)[OutputKeys.OUTPUT_VIDEO]
print(output_video_path)
return output_video_path
with gr.Blocks() as demo:
gr.Markdown(
"""
COLIPIC.AI-XLv2
SRI LANKA
COLIPIC.AI-XLv2 can generate videos with similar contents and semantics based on user input static images and text. The generated videos have characteristics such as high-definition (1280 * 720), widescreen (16:9), coherent timing, and good texture.
"""
)
with gr.Box():
gr.Markdown(
"""
Please choose the image to upload (we recommend the image size be 1280 * 720), provide the English text description of the video you wish to create, and then click on "Generate Video" to receive the generated video."""
)
with gr.Row():
with gr.Column():
text_in = gr.Textbox(label="Text description", lines=2, elem_id="text-in")
image_in = gr.Image(label="Image Input", type="filepath", interactive=False, elem_id="image-in", height=300)
with gr.Row():
upload_image = gr.UploadButton("Upload Image", file_types=["image"], file_count="single")
image_submit = gr.Button("Generate video")
with gr.Column():
video_out_1 = gr.Video(label='Generated Video', elem_id='video-out_1', interactive=False, height=300)
gr.Markdown("""
Note: If the generated video cannot be played, please try to upgrade your browser or use the Chrome browser."""
)
upload_image.upload(upload_file, upload_image, image_in, queue=False)
image_submit.click(fn=image_to_video, inputs=[image_in, text_in], outputs=[video_out_1])
with gr.Row(variant="panel"):
gr.Examples(examples=[
[os.path.join(current_directory, 'example_images/sample2.png'), 'A girl with yellow hair and black clothes stood in front of the camera'],
[os.path.join(current_directory, 'example_images/sample13.png'), 'A girl in hat and sunglasses'],
[os.path.join(current_directory, 'example_images/sample6.png'), 'Papers were floating in the air on a table in the library'],
[os.path.join(current_directory, 'example_images/sample12.jpg'),'Night sky lit with milky way galaxy'],
[os.path.join(current_directory, 'example_images/sample1.png'), 'A blonde girl in jeans'],
[os.path.join(current_directory, 'example_images/sample11.jpg'),'A wet road between the woods'],
[os.path.join(current_directory, 'example_images/sample5.png'), 'A painting of a city street with a giant monster'],
[os.path.join(current_directory, 'example_images/sample7.png'), 'A red woodcut bird'],
[os.path.join(current_directory, 'example_images/sample8.jpg'), 'A green frog floats on the surface of the water on green lotus leaves, with several pink lotus flowers, in a Chinese painting style.']
],
inputs=[image_in, text_in],
outputs=[video_out_1],
fn=image_to_video,
cache_examples=True,
examples_per_page=5,
label='Examples',
)
demo.queue(status_update_rate=1, api_open=False).launch(share=False, show_error=True)