|
import torch |
|
from time import strftime |
|
import os, sys, time |
|
from argparse import ArgumentParser |
|
|
|
from src.utils.preprocess import CropAndExtract |
|
from src.test_audio2coeff import Audio2Coeff |
|
from src.facerender.animate import AnimateFromCoeff |
|
from src.generate_batch import get_data |
|
from src.generate_facerender_batch import get_facerender_data |
|
|
|
def main(args): |
|
|
|
|
|
pic_path = args.source_image |
|
audio_path = args.driven_audio |
|
save_dir = os.path.join(args.result_dir, strftime("%Y_%m_%d_%H.%M.%S")) |
|
os.makedirs(save_dir, exist_ok=True) |
|
pose_style = args.pose_style |
|
device = args.device |
|
batch_size = args.batch_size |
|
camera_yaw_list = args.camera_yaw |
|
camera_pitch_list = args.camera_pitch |
|
camera_roll_list = args.camera_roll |
|
|
|
current_code_path = sys.argv[0] |
|
current_root_path = os.path.split(current_code_path)[0] |
|
|
|
os.environ['TORCH_HOME']=os.path.join(current_root_path, args.checkpoint_dir) |
|
|
|
path_of_lm_croper = os.path.join(current_root_path, args.checkpoint_dir, 'shape_predictor_68_face_landmarks.dat') |
|
path_of_net_recon_model = os.path.join(current_root_path, args.checkpoint_dir, 'epoch_20.pth') |
|
dir_of_BFM_fitting = os.path.join(current_root_path, args.checkpoint_dir, 'BFM_Fitting') |
|
wav2lip_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'wav2lip.pth') |
|
|
|
audio2pose_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'auido2pose_00140-model.pth') |
|
audio2pose_yaml_path = os.path.join(current_root_path, 'src', 'config', 'auido2pose.yaml') |
|
|
|
audio2exp_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'auido2exp_00300-model.pth') |
|
audio2exp_yaml_path = os.path.join(current_root_path, 'src', 'config', 'auido2exp.yaml') |
|
|
|
free_view_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'facevid2vid_00189-model.pth.tar') |
|
mapping_checkpoint = os.path.join(current_root_path, args.checkpoint_dir, 'mapping_00229-model.pth.tar') |
|
facerender_yaml_path = os.path.join(current_root_path, 'src', 'config', 'facerender.yaml') |
|
|
|
|
|
print(path_of_net_recon_model) |
|
preprocess_model = CropAndExtract(path_of_lm_croper, path_of_net_recon_model, dir_of_BFM_fitting, device) |
|
|
|
print(audio2pose_checkpoint) |
|
print(audio2exp_checkpoint) |
|
audio_to_coeff = Audio2Coeff(audio2pose_checkpoint, audio2pose_yaml_path, |
|
audio2exp_checkpoint, audio2exp_yaml_path, |
|
wav2lip_checkpoint, device) |
|
|
|
print(free_view_checkpoint) |
|
print(mapping_checkpoint) |
|
animate_from_coeff = AnimateFromCoeff(free_view_checkpoint, mapping_checkpoint, |
|
facerender_yaml_path, device) |
|
|
|
|
|
first_frame_dir = os.path.join(save_dir, 'first_frame_dir') |
|
os.makedirs(first_frame_dir, exist_ok=True) |
|
first_coeff_path, crop_pic_path = preprocess_model.generate(pic_path, first_frame_dir) |
|
if first_coeff_path is None: |
|
print("Can't get the coeffs of the input") |
|
return |
|
|
|
|
|
batch = get_data(first_coeff_path, audio_path, device) |
|
coeff_path = audio_to_coeff.generate(batch, save_dir, pose_style) |
|
|
|
|
|
if args.face3dvis: |
|
from src.face3d.visualize import gen_composed_video |
|
gen_composed_video(args, device, first_coeff_path, coeff_path, audio_path, os.path.join(save_dir, '3dface.mp4')) |
|
|
|
|
|
data = get_facerender_data(coeff_path, crop_pic_path, first_coeff_path, audio_path, |
|
batch_size, camera_yaw_list, camera_pitch_list, camera_roll_list, |
|
expression_scale=args.expression_scale, still_mode=args.still) |
|
|
|
animate_from_coeff.generate(data, save_dir, enhancer=args.enhancer) |
|
video_name = data['video_name'] |
|
|
|
if args.enhancer is not None: |
|
print(f'The generated video is named {video_name}_enhanced in {save_dir}') |
|
else: |
|
print(f'The generated video is named {video_name} in {save_dir}') |
|
|
|
return os.path.join(save_dir, video_name+'.mp4'), os.path.join(save_dir, video_name+'.mp4') |
|
|
|
|
|
if __name__ == '__main__': |
|
|
|
parser = ArgumentParser() |
|
parser.add_argument("--driven_audio", default='./examples/driven_audio/japanese.wav', help="path to driven audio") |
|
parser.add_argument("--source_image", default='./examples/source_image/art_0.png', help="path to source image") |
|
parser.add_argument("--checkpoint_dir", default='./checkpoints', help="path to output") |
|
parser.add_argument("--result_dir", default='./results', help="path to output") |
|
parser.add_argument("--pose_style", type=int, default=0, help="input pose style from [0, 46)") |
|
parser.add_argument("--batch_size", type=int, default=2, help="the batch size of facerender") |
|
parser.add_argument("--expression_scale", type=float, default=1., help="the batch size of facerender") |
|
parser.add_argument('--camera_yaw', nargs='+', type=int, default=[0], help="the camera yaw degree") |
|
parser.add_argument('--camera_pitch', nargs='+', type=int, default=[0], help="the camera pitch degree") |
|
parser.add_argument('--camera_roll', nargs='+', type=int, default=[0], help="the camera roll degree") |
|
parser.add_argument('--enhancer', type=str, default=None, help="Face enhancer, [GFPGAN]") |
|
parser.add_argument("--cpu", dest="cpu", action="store_true") |
|
parser.add_argument("--face3dvis", action="store_true", help="generate 3d face and 3d landmarks") |
|
parser.add_argument("--still", action="store_true") |
|
|
|
|
|
parser.add_argument('--net_recon', type=str, default='resnet50', choices=['resnet18', 'resnet34', 'resnet50'], help='not use') |
|
parser.add_argument('--init_path', type=str, default=None, help='not Use') |
|
parser.add_argument('--use_last_fc',default=False, help='zero initialize the last fc') |
|
parser.add_argument('--bfm_folder', type=str, default='./checkpoints/BFM_Fitting/') |
|
parser.add_argument('--bfm_model', type=str, default='BFM_model_front.mat', help='bfm model') |
|
|
|
|
|
parser.add_argument('--focal', type=float, default=1015.) |
|
parser.add_argument('--center', type=float, default=112.) |
|
parser.add_argument('--camera_d', type=float, default=10.) |
|
parser.add_argument('--z_near', type=float, default=5.) |
|
parser.add_argument('--z_far', type=float, default=15.) |
|
|
|
args = parser.parse_args() |
|
|
|
if torch.cuda.is_available() and not args.cpu: |
|
args.device = "cuda" |
|
else: |
|
args.device = "cpu" |
|
|
|
main(args) |
|
|
|
|