DmitrMakeev commited on
Commit
62f2ed3
1 Parent(s): cb9ebd7

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +6 -6
app.py CHANGED
@@ -57,7 +57,7 @@ def sadtalker_demo():
57
 
58
  with gr.Tabs(elem_id="sadtalker_driven_audio"):
59
  with gr.TabItem('Driving Methods'):
60
- gr.Markdown("Possible driving combinations: <br> 1. Audio only 2. Audio/IDLE Mode + Ref Video(pose, blink, pose+blink) 3. IDLE Mode only 4. Ref Video only (all) ")
61
 
62
  with gr.Row():
63
  driven_audio = gr.Audio(label="Input audio", source="upload", type="filepath")
@@ -73,7 +73,7 @@ def sadtalker_demo():
73
 
74
  with gr.Column():
75
  use_ref_video = gr.Checkbox(label="Use Reference Video")
76
- ref_info = gr.Radio(['pose', 'blink','pose+blink', 'all'], value='pose', label='Reference Video',info="How to borrow from reference Video?((fully transfer, aka, video driving mode))")
77
 
78
  ref_video.change(ref_video_fn, inputs=ref_video, outputs=[use_ref_video]) # todo
79
 
@@ -81,7 +81,7 @@ def sadtalker_demo():
81
  with gr.Column(variant='panel'):
82
  with gr.Tabs(elem_id="sadtalker_checkbox"):
83
  with gr.TabItem('Settings'):
84
- gr.Markdown("need help? please visit our [[best practice page](https://github.com/OpenTalker/SadTalker/blob/main/docs/best_practice.md)] for more detials")
85
  with gr.Column(variant='panel'):
86
  # width = gr.Slider(minimum=64, elem_id="img2img_width", maximum=2048, step=8, label="Manually Crop Width", value=512) # img2img_width
87
  # height = gr.Slider(minimum=64, elem_id="img2img_height", maximum=2048, step=8, label="Manually Crop Height", value=512) # img2img_width
@@ -91,12 +91,12 @@ def sadtalker_demo():
91
  blink_every = gr.Checkbox(label="use eye blink", value=True)
92
 
93
  with gr.Row():
94
- size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info="use 256/512 model?") #
95
- preprocess_type = gr.Radio(['crop', 'resize','full', 'extcrop', 'extfull'], value='crop', label='preprocess', info="How to handle input image?")
96
 
97
  with gr.Row():
98
  is_still_mode = gr.Checkbox(label="Still Mode (fewer head motion, works with preprocess `full`)")
99
- facerender = gr.Radio(['facevid2vid','pirender'], value='facevid2vid', label='facerender', info="which face render?")
100
 
101
  with gr.Row():
102
  batch_size = gr.Slider(label="batch size in generation", step=1, maximum=10, value=1)
 
57
 
58
  with gr.Tabs(elem_id="sadtalker_driven_audio"):
59
  with gr.TabItem('Driving Methods'):
60
+ gr.Markdown("")
61
 
62
  with gr.Row():
63
  driven_audio = gr.Audio(label="Input audio", source="upload", type="filepath")
 
73
 
74
  with gr.Column():
75
  use_ref_video = gr.Checkbox(label="Use Reference Video")
76
+ ref_info = gr.Radio(['pose', 'blink','pose+blink', 'all'], value='pose', label='Reference Video',info="")
77
 
78
  ref_video.change(ref_video_fn, inputs=ref_video, outputs=[use_ref_video]) # todo
79
 
 
81
  with gr.Column(variant='panel'):
82
  with gr.Tabs(elem_id="sadtalker_checkbox"):
83
  with gr.TabItem('Settings'):
84
+ gr.Markdown("")
85
  with gr.Column(variant='panel'):
86
  # width = gr.Slider(minimum=64, elem_id="img2img_width", maximum=2048, step=8, label="Manually Crop Width", value=512) # img2img_width
87
  # height = gr.Slider(minimum=64, elem_id="img2img_height", maximum=2048, step=8, label="Manually Crop Height", value=512) # img2img_width
 
91
  blink_every = gr.Checkbox(label="use eye blink", value=True)
92
 
93
  with gr.Row():
94
+ size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info="") #
95
+ preprocess_type = gr.Radio(['crop', 'resize','full', 'extcrop', 'extfull'], value='crop', label='preprocess', info="")
96
 
97
  with gr.Row():
98
  is_still_mode = gr.Checkbox(label="Still Mode (fewer head motion, works with preprocess `full`)")
99
+ facerender = gr.Radio(['facevid2vid','pirender'], value='facevid2vid', label='facerender', info="")
100
 
101
  with gr.Row():
102
  batch_size = gr.Slider(label="batch size in generation", step=1, maximum=10, value=1)