Spaces:
Sleeping
Sleeping
Update utils/other_tools_hf.py
Browse files- utils/other_tools_hf.py +97 -0
utils/other_tools_hf.py
CHANGED
@@ -600,6 +600,103 @@ def generate_images(frames, vertices_all, vertices1_all, faces, output_dir, file
|
|
600 |
# ]
|
601 |
# )
|
602 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
603 |
def render_one_sequence(
|
604 |
res_npz_path,
|
605 |
gt_npz_path,
|
|
|
600 |
# ]
|
601 |
# )
|
602 |
|
603 |
+
def render_one_sequence_with_face(
|
604 |
+
res_npz_path,
|
605 |
+
gt_npz_path,
|
606 |
+
output_dir,
|
607 |
+
audio_path,
|
608 |
+
model_folder="/data/datasets/smplx_models/",
|
609 |
+
model_type='smplx',
|
610 |
+
gender='NEUTRAL_2020',
|
611 |
+
ext='npz',
|
612 |
+
num_betas=300,
|
613 |
+
num_expression_coeffs=100,
|
614 |
+
use_face_contour=False,
|
615 |
+
use_matplotlib=False,
|
616 |
+
args=None):
|
617 |
+
import smplx
|
618 |
+
import matplotlib.pyplot as plt
|
619 |
+
import imageio
|
620 |
+
from tqdm import tqdm
|
621 |
+
import os
|
622 |
+
import numpy as np
|
623 |
+
import torch
|
624 |
+
import moviepy.editor as mp
|
625 |
+
import librosa
|
626 |
+
|
627 |
+
model = smplx.create(model_folder, model_type=model_type,
|
628 |
+
gender=gender, use_face_contour=use_face_contour,
|
629 |
+
num_betas=num_betas,
|
630 |
+
num_expression_coeffs=num_expression_coeffs,
|
631 |
+
ext=ext, use_pca=False).cuda()
|
632 |
+
|
633 |
+
#data_npz = np.load(f"{output_dir}{res_npz_path}.npz")
|
634 |
+
data_np_body = np.load(res_npz_path, allow_pickle=True)
|
635 |
+
gt_np_body = np.load(gt_npz_path, allow_pickle=True)
|
636 |
+
|
637 |
+
if not os.path.exists(output_dir): os.makedirs(output_dir)
|
638 |
+
# if not use_matplotlib:
|
639 |
+
# import trimesh
|
640 |
+
#import pyrender
|
641 |
+
from pyvirtualdisplay import Display
|
642 |
+
#'''
|
643 |
+
#display = Display(visible=0, size=(1000, 1000))
|
644 |
+
#display.start()
|
645 |
+
faces = np.load(f"{model_folder}/smplx/SMPLX_NEUTRAL_2020.npz", allow_pickle=True)["f"]
|
646 |
+
seconds = 1
|
647 |
+
#data_npz["jaw_pose"].shape[0]
|
648 |
+
n = data_np_body["poses"].shape[0]
|
649 |
+
beta = torch.from_numpy(data_np_body["betas"]).to(torch.float32).unsqueeze(0).cuda()
|
650 |
+
beta = beta.repeat(n, 1)
|
651 |
+
expression = torch.from_numpy(data_np_body["expressions"][:n]).to(torch.float32).cuda()
|
652 |
+
jaw_pose = torch.from_numpy(data_np_body["poses"][:n, 66:69]).to(torch.float32).cuda()
|
653 |
+
pose = torch.from_numpy(data_np_body["poses"][:n]).to(torch.float32).cuda()
|
654 |
+
transl = torch.from_numpy(data_np_body["trans"][:n]).to(torch.float32).cuda()
|
655 |
+
# print(beta.shape, expression.shape, jaw_pose.shape, pose.shape, transl.shape, pose[:,:3].shape)
|
656 |
+
output = model(betas=beta, transl=transl, expression=expression, jaw_pose=jaw_pose,
|
657 |
+
global_orient=pose[:,:3], body_pose=pose[:,3:21*3+3], left_hand_pose=pose[:,25*3:40*3], right_hand_pose=pose[:,40*3:55*3],
|
658 |
+
leye_pose=pose[:, 69:72],
|
659 |
+
reye_pose=pose[:, 72:75],
|
660 |
+
return_verts=True)
|
661 |
+
vertices_all = output["vertices"].cpu().detach().numpy()
|
662 |
+
|
663 |
+
beta1 = torch.from_numpy(data_np_body["betas"]).to(torch.float32).unsqueeze(0).cuda()
|
664 |
+
beta1 = beta1.repeat(n, 1)
|
665 |
+
expression1 = torch.from_numpy(data_np_body["expressions"][:n]).to(torch.float32).cuda()
|
666 |
+
zero_pose = np.zeros_like(data_np_body["poses"])
|
667 |
+
jaw_pose1 = torch.from_numpy(zero_pose[:n,66:69]).to(torch.float32).cuda()
|
668 |
+
pose1 = torch.from_numpy(zero_pose[:n]).to(torch.float32).cuda()
|
669 |
+
zero_trans = np.zeros_like(data_np_body["trans"])
|
670 |
+
transl1 = torch.from_numpy(zero_trans[:n]).to(torch.float32).cuda()
|
671 |
+
output1 = model(betas=beta1, transl=transl1, expression=expression1, jaw_pose=jaw_pose1,
|
672 |
+
global_orient=pose1[:,:3], body_pose=pose1[:,3:21*3+3], left_hand_pose=pose1[:,25*3:40*3], right_hand_pose=pose1[:,40*3:55*3],
|
673 |
+
leye_pose=pose1[:, 69:72],
|
674 |
+
reye_pose=pose1[:, 72:75],
|
675 |
+
return_verts=True)
|
676 |
+
vertices1_all = output1["vertices"].cpu().detach().numpy()*8
|
677 |
+
trans_down = np.zeros_like(vertices1_all)
|
678 |
+
trans_down[:, :, 1] = 1.55
|
679 |
+
vertices1_all = vertices1_all - trans_down
|
680 |
+
if args.debug:
|
681 |
+
seconds = 1
|
682 |
+
else:
|
683 |
+
seconds = vertices_all.shape[0]//30
|
684 |
+
silent_video_file_path = utils.fast_render.generate_silent_videos(args.render_video_fps,
|
685 |
+
args.render_video_width,
|
686 |
+
args.render_video_height,
|
687 |
+
args.render_concurrent_num,
|
688 |
+
args.render_tmp_img_filetype,
|
689 |
+
int(seconds*args.render_video_fps),
|
690 |
+
vertices1_all,
|
691 |
+
vertices_all,
|
692 |
+
faces,
|
693 |
+
output_dir)
|
694 |
+
base_filename_without_ext = os.path.splitext(os.path.basename(res_npz_path))[0]
|
695 |
+
final_clip = os.path.join(output_dir, f"{base_filename_without_ext}.mp4")
|
696 |
+
utils.media.add_audio_to_video(silent_video_file_path, audio_path, final_clip)
|
697 |
+
os.remove(silent_video_file_path)
|
698 |
+
return final_clip
|
699 |
+
|
700 |
def render_one_sequence(
|
701 |
res_npz_path,
|
702 |
gt_npz_path,
|