VideoCrafter / app.py
imjunaidafzal's picture
Update app.py
6356741
raw
history blame
6.53 kB
import os
import sys
import gradio as gr
# from demo_test import Text2Video, VideoControl
from videocrafter_test import Text2Video
from videocontrol_test import VideoControl
sys.path.insert(1, os.path.join(sys.path[0], 'lvdm'))
t2v_examples = [
['an elephant is walking under the sea, 4K, high definition',50,'origin',1,15,1,],
['an astronaut riding a horse in outer space',25,'origin',1,15,1,],
['a monkey is playing a piano',25,'vangogh',1,15,1,],
['A fire is burning on a candle',25,'frozen',1,15,1,],
['a horse is drinking in the river',25,'yourname',1,15,1,],
['Robot dancing in times square',25,'coco',1,15,1,],
]
control_examples = [
['input/flamingo.mp4', 'An ostrich walking in the desert, photorealistic, 4k', 0, 50, 15, 1, 16, 256]
]
def videocrafter_demo(result_dir='./tmp/'):
text2video = Text2Video(result_dir)
videocontrol = VideoControl(result_dir)
with gr.Blocks(analytics_enabled=False) as videocrafter_iface:
gr.Markdown("<div align='center'> <h2> VideoCrafter: A Toolkit for Text-to-Video Generation and Editing </span> </h2> \
<a style='font-size:18px;color: #000000' href='https://github.com/VideoCrafter/VideoCrafter'> Github </div>")
gr.Markdown("<b> You may duplicate the space and upgrade to GPU in settings for better performance and faster inference without waiting in the queue. <a style='display:inline-block' href='https://huggingface.co/spaces/VideoCrafter/VideoCrafter?duplicate=true'> <img src='https://bit.ly/3gLdBN6' alt='Duplicate Space'></a> </b>")
#######t2v#######
with gr.Tab(label="Text2Video"):
with gr.Column():
with gr.Row().style(equal_height=False):
with gr.Column():
input_text = gr.Text(label='Prompts')
model_choices=['origin','vangogh','frozen','yourname', 'coco']
with gr.Row():
model_index = gr.Dropdown(label='Models', elem_id=f"model", choices=model_choices, value=model_choices[0], type="index",interactive=True)
with gr.Row():
steps = gr.Slider(minimum=1, maximum=60, step=1, elem_id=f"steps", label="Sampling steps", value=50)
eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label='ETA', value=1.0, elem_id="eta")
with gr.Row():
lora_scale = gr.Slider(minimum=0.0, maximum=2.0, step=0.1, label='Lora Scale', value=1.0, elem_id="lora_scale")
cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=15.0, elem_id="cfg_scale")
send_btn = gr.Button("Send")
with gr.Tab(label='result'):
output_video_1 = gr.Video().style(width=384)
gr.Examples(examples=t2v_examples,
inputs=[input_text,steps,model_index,eta,cfg_scale,lora_scale],
outputs=[output_video_1],
fn=text2video.get_prompt,
cache_examples=False)
#cache_examples=os.getenv('SYSTEM') == 'spaces')
send_btn.click(
fn=text2video.get_prompt,
inputs=[input_text,steps,model_index,eta,cfg_scale,lora_scale,],
outputs=[output_video_1],
)
#######videocontrol######
with gr.Tab(label='VideoControl'):
with gr.Column():
with gr.Row():
# with gr.Tab(label='input'):
with gr.Column():
with gr.Row():
vc_input_video = gr.Video(label="Input Video").style(width=256)
vc_origin_video = gr.Video(label='Center-cropped Video').style(width=256)
with gr.Row():
vc_input_text = gr.Text(label='Prompts')
with gr.Row():
vc_eta = gr.Slider(minimum=0.0, maximum=1.0, step=0.1, label='ETA', value=1.0, elem_id="vc_eta")
vc_cfg_scale = gr.Slider(minimum=1.0, maximum=30.0, step=0.5, label='CFG Scale', value=15.0, elem_id="vc_cfg_scale")
with gr.Row():
vc_steps = gr.Slider(minimum=1, maximum=60, step=1, elem_id="vc_steps", label="Sampling steps", value=50)
frame_stride = gr.Slider(minimum=0 , maximum=100, step=1, label='Frame Stride', value=0, elem_id="vc_frame_stride")
with gr.Row():
resolution = gr.Slider(minimum=128 , maximum=512, step=8, label='Long Side Resolution', value=256, elem_id="vc_resolution")
video_frames = gr.Slider(minimum=8 , maximum=64, step=1, label='Video Frame Num', value=16, elem_id="vc_video_frames")
vc_end_btn = gr.Button("Send")
with gr.Tab(label='Result'):
vc_output_info = gr.Text(label='Info')
with gr.Row():
vc_depth_video = gr.Video(label="Depth Video").style(width=256)
vc_output_video = gr.Video(label="Generated Video").style(width=256)
gr.Examples(examples=control_examples,
inputs=[vc_input_video, vc_input_text, frame_stride, vc_steps, vc_cfg_scale, vc_eta, video_frames, resolution],
outputs=[vc_output_info, vc_origin_video, vc_depth_video, vc_output_video],
fn = videocontrol.get_video,
cache_examples=os.getenv('SYSTEM') == 'spaces',
)
vc_end_btn.click(inputs=[vc_input_video, vc_input_text, frame_stride, vc_steps, vc_cfg_scale, vc_eta, video_frames, resolution],
outputs=[vc_output_info, vc_origin_video, vc_depth_video, vc_output_video],
fn = videocontrol.get_video
)
return videocrafter_iface
if __name__ == "__main__":
result_dir = os.path.join('./', 'results')
videocrafter_iface = videocrafter_demo(result_dir)
videocrafter_iface.queue(concurrency_count=1, max_size=10)
videocrafter_iface.launch(share=True)
# videocrafter_iface.launch(server_name='0.0.0.0', server_port=80)