nightfury commited on
Commit
bcc0dc1
1 Parent(s): fae9adb

Delete appp.py

Browse files
Files changed (1) hide show
  1. appp.py +0 -223
appp.py DELETED
@@ -1,223 +0,0 @@
1
- import os, sys
2
- import tempfile
3
- import gradio as gr
4
- from src.gradio_demo import SadTalker
5
- # from src.utils.text2speech import TTSTalker
6
- from huggingface_hub import snapshot_download
7
-
8
- def get_source_image(image):
9
- return image
10
-
11
- try:
12
- import webui # in webui
13
- in_webui = True
14
- except:
15
- in_webui = False
16
-
17
-
18
- def toggle_audio_file(choice):
19
- if choice == False:
20
- return gr.update(visible=True), gr.update(visible=False)
21
- else:
22
- return gr.update(visible=False), gr.update(visible=True)
23
-
24
- def ref_video_fn(path_of_ref_video):
25
- if path_of_ref_video is not None:
26
- return gr.update(value=True)
27
- else:
28
- return gr.update(value=False)
29
-
30
- def download_model():
31
- REPO_ID = 'vinthony/SadTalker-V002rc'
32
- snapshot_download(repo_id=REPO_ID, local_dir='./checkpoints', local_dir_use_symlinks=True)
33
-
34
- def sadtalker_demo():
35
-
36
- download_model()
37
-
38
- sad_talker = SadTalker(lazy_load=True)
39
- # tts_talker = TTSTalker()
40
-
41
- with gr.Blocks(analytics_enabled=False) as sadtalker_interface:
42
- gr.Markdown("<div align='center'> <h2> 😭 SadTalker: Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation (CVPR 2023) </span> </h2> \
43
- <a style='font-size:18px;color: #efefef' href='https://arxiv.org/abs/2211.12194'>Arxiv</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
44
- <a style='font-size:18px;color: #efefef' href='https://sadtalker.github.io'>Homepage</a> &nbsp;&nbsp;&nbsp;&nbsp;&nbsp; \
45
- <a style='font-size:18px;color: #efefef' href='https://github.com/Winfredy/SadTalker'> Github </div>")
46
-
47
-
48
- gr.Markdown("""
49
- <b>You may duplicate the space and upgrade to GPU in settings for better performance and faster inference without waiting in the queue. <a style='display:inline-block' href="https://huggingface.co/spaces/vinthony/SadTalker?duplicate=true"><img src="https://bit.ly/3gLdBN6" alt="Duplicate Space"></a></b> \
50
- <br/><b>Alternatively, try our GitHub <a href=https://github.com/Winfredy/SadTalker> code </a> on your own GPU. </b> <a style='display:inline-block' href="https://github.com/Winfredy/SadTalker"><img src="https://img.shields.io/github/stars/Winfredy/SadTalker?style=social"/></a> \
51
- """)
52
-
53
- with gr.Row().style(equal_height=False):
54
- with gr.Column(variant='panel'):
55
- with gr.Tabs(elem_id="sadtalker_source_image"):
56
- with gr.TabItem('Source image'):
57
- with gr.Row():
58
- source_image = gr.Image(label="Source image", source="upload", type="filepath", elem_id="img2img_image").style(width=512)
59
-
60
-
61
- with gr.Tabs(elem_id="sadtalker_driven_audio"):
62
- with gr.TabItem('Driving Methods'):
63
- gr.Markdown("Possible driving combinations: <br> 1. Audio only 2. Audio/IDLE Mode + Ref Video(pose, blink, pose+blink) 3. IDLE Mode only 4. Ref Video only (all) ")
64
-
65
- with gr.Row():
66
- driven_audio = gr.Audio(label="Input audio", source="upload", type="filepath")
67
- driven_audio_no = gr.Audio(label="Use IDLE mode, no audio is required", source="upload", type="filepath", visible=False)
68
-
69
- with gr.Column():
70
- use_idle_mode = gr.Checkbox(label="Use Idle Animation")
71
- length_of_audio = gr.Number(value=5, label="The length(seconds) of the generated video.")
72
- use_idle_mode.change(toggle_audio_file, inputs=use_idle_mode, outputs=[driven_audio, driven_audio_no]) # todo
73
-
74
- with gr.Row():
75
- ref_video = gr.Video(label="Reference Video", source="upload", type="filepath", elem_id="vidref").style(width=512)
76
-
77
- with gr.Column():
78
- use_ref_video = gr.Checkbox(label="Use Reference Video")
79
- ref_info = gr.Radio(['pose', 'blink','pose+blink', 'all'], value='pose', label='Reference Video',info="How to borrow from reference Video?((fully transfer, aka, video driving mode))")
80
-
81
- ref_video.change(ref_video_fn, inputs=ref_video, outputs=[use_ref_video]) # todo
82
-
83
-
84
- with gr.Column(variant='panel'):
85
- with gr.Tabs(elem_id="sadtalker_checkbox"):
86
- with gr.TabItem('Settings'):
87
- gr.Markdown("need help? please visit our [[best practice page](https://github.com/OpenTalker/SadTalker/blob/main/docs/best_practice.md)] for more detials")
88
- with gr.Column(variant='panel'):
89
- # width = gr.Slider(minimum=64, elem_id="img2img_width", maximum=2048, step=8, label="Manually Crop Width", value=512) # img2img_width
90
- # height = gr.Slider(minimum=64, elem_id="img2img_height", maximum=2048, step=8, label="Manually Crop Height", value=512) # img2img_width
91
- with gr.Row():
92
- pose_style = gr.Slider(minimum=0, maximum=45, step=1, label="Pose style", value=0) #
93
- exp_weight = gr.Slider(minimum=0, maximum=3, step=0.1, label="expression scale", value=1) #
94
- blink_every = gr.Checkbox(label="use eye blink", value=True)
95
-
96
- with gr.Row():
97
- size_of_image = gr.Radio([256, 512], value=256, label='face model resolution', info="use 256/512 model?") #
98
- preprocess_type = gr.Radio(['crop', 'resize','full', 'extcrop', 'extfull'], value='crop', label='preprocess', info="How to handle input image?")
99
-
100
- with gr.Row():
101
- is_still_mode = gr.Checkbox(label="Still Mode (fewer head motion, works with preprocess `full`)")
102
- facerender = gr.Radio(['facevid2vid','pirender'], value='facevid2vid', label='facerender', info="which face render?")
103
-
104
- with gr.Row():
105
- batch_size = gr.Slider(label="batch size in generation", step=1, maximum=10, value=1)
106
- enhancer = gr.Checkbox(label="GFPGAN as Face enhancer")
107
-
108
- submit = gr.Button('Generate', elem_id="sadtalker_generate", variant='primary')
109
-
110
- with gr.Tabs(elem_id="sadtalker_genearted"):
111
- gen_video = gr.Video(label="Generated video", format="mp4").style(width=256)
112
-
113
-
114
-
115
- submit.click(
116
- fn=sad_talker.test,
117
- inputs=[source_image,
118
- driven_audio,
119
- preprocess_type,
120
- is_still_mode,
121
- enhancer,
122
- batch_size,
123
- size_of_image,
124
- pose_style,
125
- facerender,
126
- exp_weight,
127
- use_ref_video,
128
- ref_video,
129
- ref_info,
130
- use_idle_mode,
131
- length_of_audio,
132
- blink_every
133
- ],
134
- outputs=[gen_video]
135
- )
136
-
137
- with gr.Row():
138
- examples = [
139
- [
140
- 'examples/source_image/full_body_1.png',
141
- 'examples/driven_audio/bus_chinese.wav',
142
- 'crop',
143
- True,
144
- False
145
- ],
146
- [
147
- 'examples/source_image/full_body_2.png',
148
- 'examples/driven_audio/japanese.wav',
149
- 'crop',
150
- False,
151
- False
152
- ],
153
- [
154
- 'examples/source_image/full3.png',
155
- 'examples/driven_audio/deyu.wav',
156
- 'crop',
157
- False,
158
- True
159
- ],
160
- [
161
- 'examples/source_image/full4.jpeg',
162
- 'examples/driven_audio/eluosi.wav',
163
- 'full',
164
- False,
165
- True
166
- ],
167
- [
168
- 'examples/source_image/full4.jpeg',
169
- 'examples/driven_audio/imagine.wav',
170
- 'full',
171
- True,
172
- True
173
- ],
174
- [
175
- 'examples/source_image/full_body_1.png',
176
- 'examples/driven_audio/bus_chinese.wav',
177
- 'full',
178
- True,
179
- False
180
- ],
181
- [
182
- 'examples/source_image/art_13.png',
183
- 'examples/driven_audio/fayu.wav',
184
- 'resize',
185
- True,
186
- False
187
- ],
188
- [
189
- 'examples/source_image/art_5.png',
190
- 'examples/driven_audio/chinese_news.wav',
191
- 'resize',
192
- False,
193
- False
194
- ],
195
- [
196
- 'examples/source_image/art_5.png',
197
- 'examples/driven_audio/RD_Radio31_000.wav',
198
- 'resize',
199
- True,
200
- True
201
- ],
202
- ]
203
- gr.Examples(examples=examples,
204
- inputs=[
205
- source_image,
206
- driven_audio,
207
- preprocess_type,
208
- is_still_mode,
209
- enhancer],
210
- outputs=[gen_video],
211
- fn=sad_talker.test,
212
- cache_examples=os.getenv('SYSTEM') == 'spaces') #
213
-
214
- return sadtalker_interface
215
-
216
-
217
- if __name__ == "__main__":
218
-
219
- demo = sadtalker_demo()
220
- demo.queue(max_size=10)
221
- demo.launch(debug=True)
222
-
223
-