zejunyang commited on
Commit
d61d34c
1 Parent(s): aeec601
Files changed (1) hide show
  1. app.py +83 -0
app.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+
3
+ from src.audio2vid import audio2video
4
+ from src.vid2vid import video2video
5
+
6
+ title = r"""
7
+ <h1>AniPortrait</h1>
8
+ """
9
+
10
+ description = r"""
11
+ <b>Official 🤗 Gradio demo</b> for <a href='https://github.com/Zejun-Yang/AniPortrait' target='_blank'><b>AniPortrait: Audio-Driven Synthesis of Photorealistic Portrait Animations</b></a>.<br>
12
+ """
13
+
14
+ with gr.Blocks() as demo:
15
+
16
+ gr.Markdown(title)
17
+ gr.Markdown(description)
18
+
19
+ with gr.Tab("Audio2video"):
20
+ with gr.Column():
21
+ with gr.Row():
22
+ a2v_input_audio = gr.Audio(sources=["upload", "microphone"], type="filepath", editable=True, label="Input audio", interactive=True)
23
+ with gr.Column():
24
+ a2v_ref_img = gr.Image(label="Upload reference image", sources="upload")
25
+ a2v_img_trans_real_botton = gr.Button("Translate to realistic style")
26
+ a2v_headpose_video = gr.Video(label="Option: upload head pose reference video", sources="upload")
27
+
28
+ with gr.Row():
29
+ a2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
30
+ a2v_step_slider = gr.Slider(minimum=5, maximum=50, value=25, label="Steps (--steps)")
31
+
32
+ with gr.Row():
33
+ a2v_length = gr.Number(value=150, label="Length (-L) (Set 0 to automatically calculate video length.)")
34
+ a2v_seed = gr.Number(value=42, label="Seed (--seed)")
35
+
36
+ a2v_botton = gr.Button("Generate", variant="primary")
37
+ a2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
38
+
39
+
40
+ with gr.Tab("Video2video"):
41
+ with gr.Column():
42
+ with gr.Row():
43
+ with gr.Column():
44
+ v2v_ref_img = gr.Image(label="Upload reference image", sources="upload")
45
+ v2v_img_trans_real_botton = gr.Button("Translate to realistic style")
46
+ v2v_source_video = gr.Video(label="Upload source video", sources="upload")
47
+
48
+ with gr.Row():
49
+ v2v_size_slider = gr.Slider(minimum=256, maximum=1024, step=8, value=512, label="Video size (-W & -H)")
50
+ v2v_step_slider = gr.Slider(minimum=5, maximum=50, value=25, label="Steps (--steps)")
51
+
52
+ with gr.Row():
53
+ v2v_length = gr.Number(value=150, label="Length (-L) (Set 0 to automatically calculate video length.)")
54
+ v2v_seed = gr.Number(value=42, label="Seed (--seed)")
55
+
56
+ v2v_botton = gr.Button("Generate", variant="primary")
57
+ v2v_output_video = gr.PlayableVideo(label="Result", interactive=False)
58
+ v2v_bg_restore_botton = gr.Button("Background restoration with SAM")
59
+
60
+ a2v_botton.click(
61
+ fn=audio2video,
62
+ inputs=[a2v_input_audio, a2v_ref_img, a2v_headpose_video,
63
+ a2v_size_slider, a2v_step_slider, a2v_length, a2v_seed],
64
+ outputs=[a2v_output_video]
65
+ )
66
+ # a2v_img_trans_real_botton.click(
67
+ # fn=sd_img2real,
68
+ # inputs=[a2v_ref_img],
69
+ # outputs=[a2v_ref_img]
70
+ # )
71
+ v2v_botton.click(
72
+ fn=video2video,
73
+ inputs=[v2v_ref_img, v2v_source_video,
74
+ v2v_size_slider, v2v_step_slider, v2v_length, v2v_seed],
75
+ outputs=[v2v_output_video]
76
+ )
77
+ # v2v_img_trans_real_botton.click(
78
+ # fn=sd_img2real,
79
+ # inputs=[v2v_ref_img],
80
+ # outputs=[v2v_ref_img]
81
+ # )
82
+
83
+ demo.launch()