Spaces:
Running
on
Zero
Running
on
Zero
import gradio as gr | |
from src.audio2vid import audio2video | |
from src.vid2vid import video2video | |
title = r""" | |
<h1>AniPortrait</h1> | |
""" | |
description = r""" | |
<b>Official π€ Gradio demo</b> for <a href='https://github.com/Zejun-Yang/AniPortrait' target='_blank'><b>AniPortrait: Audio-Driven Synthesis of Photorealistic Portrait Animations</b></a>.<br> | |
""" | |
with gr.Blocks() as demo: | |
gr.Markdown(title) | |
gr.Markdown(description) | |
with gr.Tab("Audio2video"): | |
with gr.Column(): | |
with gr.Row(): | |
a2v_input_audio = gr.Audio(sources=["upload", "microphone"], type="filepath", editable=True, label="Input audio", interactive=True) | |
with gr.Column(): | |
a2v_ref_img = gr.Image(label="Upload reference image", sources="upload") | |
a2v_img_trans_real_botton = gr.Button("Translate to realistic style") | |
a2v_headpose_video = gr.Video(label="Option: upload head pose reference video", sources="upload") | |
with gr.Row(): | |
a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)") | |
a2v_step_slider = gr.Slider(minimum=5, maximum=50, value=25, label="Steps (--steps)") | |
with gr.Row(): | |
a2v_length = gr.Number(value=150, label="Length (-L) (Set 0 to automatically calculate video length.)") | |
a2v_seed = gr.Number(value=42, label="Seed (--seed)") | |
a2v_botton = gr.Button("Generate", variant="primary") | |
a2v_output_video = gr.PlayableVideo(label="Result", interactive=False) | |
with gr.Tab("Video2video"): | |
with gr.Column(): | |
with gr.Row(): | |
with gr.Column(): | |
v2v_ref_img = gr.Image(label="Upload reference image", sources="upload") | |
v2v_img_trans_real_botton = gr.Button("Translate to realistic style") | |
v2v_source_video = gr.Video(label="Upload source video", sources="upload") | |
with gr.Row(): | |
v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)") | |
v2v_step_slider = gr.Slider(minimum=5, maximum=50, value=25, label="Steps (--steps)") | |
with gr.Row(): | |
v2v_length = gr.Number(value=150, label="Length (-L) (Set 0 to automatically calculate video length.)") | |
v2v_seed = gr.Number(value=42, label="Seed (--seed)") | |
v2v_botton = gr.Button("Generate", variant="primary") | |
v2v_output_video = gr.PlayableVideo(label="Result", interactive=False) | |
v2v_bg_restore_botton = gr.Button("Background restoration with SAM") | |
a2v_botton.click( | |
fn=audio2video, | |
inputs=[a2v_input_audio, a2v_ref_img, a2v_headpose_video, | |
a2v_size_slider, a2v_step_slider, a2v_length, a2v_seed], | |
outputs=[a2v_output_video] | |
) | |
# a2v_img_trans_real_botton.click( | |
# fn=sd_img2real, | |
# inputs=[a2v_ref_img], | |
# outputs=[a2v_ref_img] | |
# ) | |
v2v_botton.click( | |
fn=video2video, | |
inputs=[v2v_ref_img, v2v_source_video, | |
v2v_size_slider, v2v_step_slider, v2v_length, v2v_seed], | |
outputs=[v2v_output_video] | |
) | |
# v2v_img_trans_real_botton.click( | |
# fn=sd_img2real, | |
# inputs=[v2v_ref_img], | |
# outputs=[v2v_ref_img] | |
# ) | |
demo.launch() | |