fixed resize mode
Browse files- app.py +2 -2
- modules/sadtalker_test.py +1 -3
app.py
CHANGED
@@ -22,10 +22,10 @@ def sadtalker_demo(result_dir='./tmp/'):
|
|
22 |
|
23 |
sad_talker = SadTalker()
|
24 |
with gr.Blocks(analytics_enabled=False) as sadtalker_interface:
|
25 |
-
gr.Markdown("<div align='center'> <
|
26 |
<a style='font-size:18px;color: #efefef' href='https://arxiv.org/abs/2211.12194'>Arxiv</a> \
|
27 |
<a style='font-size:18px;color: #efefef' href='https://sadtalker.github.io'>Homepage</a> \
|
28 |
-
<a style='font-size:18px;color: #efefef' href='https://github.com/Winfredy/SadTalker'> Github </div>")
|
29 |
|
30 |
with gr.Row():
|
31 |
with gr.Column(variant='panel'):
|
|
|
22 |
|
23 |
sad_talker = SadTalker()
|
24 |
with gr.Blocks(analytics_enabled=False) as sadtalker_interface:
|
25 |
+
gr.Markdown("<div align='center'> <h3> 😭 SadTalker: Learning Realistic 3D Motion Coefficients for Stylized Audio-Driven Single Image Talking Face Animation (CVPR 2023) </h3> \
|
26 |
<a style='font-size:18px;color: #efefef' href='https://arxiv.org/abs/2211.12194'>Arxiv</a> \
|
27 |
<a style='font-size:18px;color: #efefef' href='https://sadtalker.github.io'>Homepage</a> \
|
28 |
+
<a style='font-size:18px;color: #efefef' href='https://github.com/Winfredy/SadTalker'> Github </a> </div>")
|
29 |
|
30 |
with gr.Row():
|
31 |
with gr.Column(variant='panel'):
|
modules/sadtalker_test.py
CHANGED
@@ -1,7 +1,5 @@
|
|
1 |
import torch
|
2 |
-
from time import gmtime, strftime
|
3 |
import os, sys, shutil
|
4 |
-
from argparse import ArgumentParser
|
5 |
from src.utils.preprocess import CropAndExtract
|
6 |
from src.test_audio2coeff import Audio2Coeff
|
7 |
from src.facerender.animate import AnimateFromCoeff
|
@@ -91,7 +89,7 @@ class SadTalker():
|
|
91 |
#crop image and extract 3dmm from image
|
92 |
first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
|
93 |
os.makedirs(first_frame_dir, exist_ok=True)
|
94 |
-
first_coeff_path, crop_pic_path, original_size = self.preprocess_model.generate(pic_path, first_frame_dir, crop_or_resize= 'crop' if resize_mode
|
95 |
if first_coeff_path is None:
|
96 |
raise AttributeError("No face is detected")
|
97 |
|
|
|
1 |
import torch
|
|
|
2 |
import os, sys, shutil
|
|
|
3 |
from src.utils.preprocess import CropAndExtract
|
4 |
from src.test_audio2coeff import Audio2Coeff
|
5 |
from src.facerender.animate import AnimateFromCoeff
|
|
|
89 |
#crop image and extract 3dmm from image
|
90 |
first_frame_dir = os.path.join(save_dir, 'first_frame_dir')
|
91 |
os.makedirs(first_frame_dir, exist_ok=True)
|
92 |
+
first_coeff_path, crop_pic_path, original_size = self.preprocess_model.generate(pic_path, first_frame_dir, crop_or_resize= 'crop' if resize_mode else 'resize')
|
93 |
if first_coeff_path is None:
|
94 |
raise AttributeError("No face is detected")
|
95 |
|