Commit
•
62f2ed3
1
Parent(s):
cb9ebd7
Update app.py
Browse files
app.py
CHANGED
@@ -57,7 +57,7 @@ def sadtalker_demo():
|
|
57 |
|
58 |
with gr.Tabs(elem_id="sadtalker_driven_audio"):
|
59 |
with gr.TabItem('Driving Methods'):
|
60 |
-
gr.Markdown("
|
61 |
|
62 |
with gr.Row():
|
63 |
driven_audio = gr.Audio(label="Input audio", source="upload", type="filepath")
|
@@ -73,7 +73,7 @@ def sadtalker_demo():
|
|
73 |
|
74 |
with gr.Column():
|
75 |
use_ref_video = gr.Checkbox(label="Use Reference Video")
|
76 |
-
ref_info = gr.Radio(['pose', 'blink','pose+blink', 'all'], value='pose', label='Reference Video',info="
|
77 |
|
78 |
ref_video.change(ref_video_fn, inputs=ref_video, outputs=[use_ref_video]) # todo
|
79 |
|
@@ -81,7 +81,7 @@ def sadtalker_demo():
|
|
81 |
with gr.Column(variant='panel'):
|
82 |
with gr.Tabs(elem_id="sadtalker_checkbox"):
|
83 |
with gr.TabItem('Settings'):
|
84 |
-
gr.Markdown("
|
85 |
with gr.Column(variant='panel'):
|
86 |
# width = gr.Slider(minimum=64, elem_id="img2img_width", maximum=2048, step=8, label="Manually Crop Width", value=512) # img2img_width
|
87 |
# height = gr.Slider(minimum=64, elem_id="img2img_height", maximum=2048, step=8, label="Manually Crop Height", value=512) # img2img_width
|
@@ -91,12 +91,12 @@ def sadtalker_demo():
|
|
91 |
blink_every = gr.Checkbox(label="use eye blink", value=True)
|
92 |
|
93 |
with gr.Row():
|
94 |
-
size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info="
|
95 |
-
preprocess_type = gr.Radio(['crop', 'resize','full', 'extcrop', 'extfull'], value='crop', label='preprocess', info="
|
96 |
|
97 |
with gr.Row():
|
98 |
is_still_mode = gr.Checkbox(label="Still Mode (fewer head motion, works with preprocess `full`)")
|
99 |
-
facerender = gr.Radio(['facevid2vid','pirender'], value='facevid2vid', label='facerender', info="
|
100 |
|
101 |
with gr.Row():
|
102 |
batch_size = gr.Slider(label="batch size in generation", step=1, maximum=10, value=1)
|
|
|
57 |
|
58 |
with gr.Tabs(elem_id="sadtalker_driven_audio"):
|
59 |
with gr.TabItem('Driving Methods'):
|
60 |
+
gr.Markdown("")
|
61 |
|
62 |
with gr.Row():
|
63 |
driven_audio = gr.Audio(label="Input audio", source="upload", type="filepath")
|
|
|
73 |
|
74 |
with gr.Column():
|
75 |
use_ref_video = gr.Checkbox(label="Use Reference Video")
|
76 |
+
ref_info = gr.Radio(['pose', 'blink','pose+blink', 'all'], value='pose', label='Reference Video',info="")
|
77 |
|
78 |
ref_video.change(ref_video_fn, inputs=ref_video, outputs=[use_ref_video]) # todo
|
79 |
|
|
|
81 |
with gr.Column(variant='panel'):
|
82 |
with gr.Tabs(elem_id="sadtalker_checkbox"):
|
83 |
with gr.TabItem('Settings'):
|
84 |
+
gr.Markdown("")
|
85 |
with gr.Column(variant='panel'):
|
86 |
# width = gr.Slider(minimum=64, elem_id="img2img_width", maximum=2048, step=8, label="Manually Crop Width", value=512) # img2img_width
|
87 |
# height = gr.Slider(minimum=64, elem_id="img2img_height", maximum=2048, step=8, label="Manually Crop Height", value=512) # img2img_width
|
|
|
91 |
blink_every = gr.Checkbox(label="use eye blink", value=True)
|
92 |
|
93 |
with gr.Row():
|
94 |
+
size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info="") #
|
95 |
+
preprocess_type = gr.Radio(['crop', 'resize','full', 'extcrop', 'extfull'], value='crop', label='preprocess', info="")
|
96 |
|
97 |
with gr.Row():
|
98 |
is_still_mode = gr.Checkbox(label="Still Mode (fewer head motion, works with preprocess `full`)")
|
99 |
+
facerender = gr.Radio(['facevid2vid','pirender'], value='facevid2vid', label='facerender', info="")
|
100 |
|
101 |
with gr.Row():
|
102 |
batch_size = gr.Slider(label="batch size in generation", step=1, maximum=10, value=1)
|