File size: 4,383 Bytes
c06d036
 
 
 
 
 
 
9efc8b7
c06d036
 
 
 
4f44d19
c06d036
 
 
 
 
 
 
 
fb05005
c06d036
 
 
 
 
 
 
 
 
 
 
 
0b685d7
c06d036
 
 
 
 
 
 
 
 
 
 
 
ed5089d
fb05005
c06d036
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fb05005
c06d036
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2de58f8
c06d036
 
2de58f8
 
 
c06d036
 
 
 
 
 
 
 
4f44d19
 
c06d036
2de58f8
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
import smplx
import torch
import pickle
import numpy as np
import os
import trimesh
import cv2
os.environ['PYOPENGL_PLATFORM'] = 'egl'
import pyrender

root_dir = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), '../..'))
def get_smplx_model(bs, deivce):
    smpl_model = smplx.create(os.path.join(root_dir, 'deps/smplx/models'),
                                    model_type='smplx',
                                    gender='male',
                                    ext='npz',
                                    batch_size=bs,
                                    ).to(device)
    return smpl_model.eval()


def render_motion_to_video(motion, device, output_path, title, step=2):
    length = motion['transl'].shape[0]
    model = get_smplx_model(length, device)
    vertices = model(
        body_pose = torch.tensor(motion['body_pose'], dtype=torch.float32).to(device),
        transl = torch.tensor(motion['transl'], dtype=torch.float32).to(device),
        global_orient = torch.tensor(motion['global_orient'], dtype=torch.float32).to(device)
    ).vertices.detach().cpu().numpy()
    faces = model.faces
    
    temp_dir = "temp_render_images"
    os.makedirs(temp_dir, exist_ok=True)
    
    width, height = 480, 320
    images = []
    for i in range(0, length, step):
        trimesh_mesh = trimesh.Trimesh(vertices=vertices[i], faces=faces)
        mesh = pyrender.Mesh.from_trimesh(trimesh_mesh)
        
        scene = pyrender.Scene(ambient_light=[0.5, 0.5, 0.5])
        scene.add(mesh)
        
        camera = pyrender.PerspectiveCamera(yfov=np.pi / 3.0)
        angle = -np.pi / 8
        camera_pose = np.array([
            [1, 0, 0, 0],
            [0, np.cos(angle), -np.sin(angle), 2.0],
            [0, np.sin(angle), np.cos(angle), 4.0],
            [0, 0, 0, 1]
        ])
        scene.add(camera, pose=camera_pose)
        light = pyrender.DirectionalLight(color=[1.0, 1.0, 1.0], intensity=2.0)
        scene.add(light, pose=camera_pose)
        
        r = pyrender.OffscreenRenderer(width, height)
        color, _ = r.render(scene)
        r.delete()
        
        image = cv2.cvtColor(color, cv2.COLOR_RGB2BGR)
        
        img_path = os.path.join(temp_dir, f"frame_{i:04d}.png")
        cv2.imwrite(img_path, image)
        images.append(image)
    
    if images:
        height, width, _ = images[0].shape
        sampled_fps = int(20 / step)
        temp_video_path = output_path.replace('.mp4', '_temp.mp4')

        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        video = cv2.VideoWriter(temp_video_path, fourcc, sampled_fps, (width, height))
        for image in images:
            video.write(image)
        video.release()

        try:
            import subprocess
            cmd = [
                'ffmpeg',
                '-y',
                '-i', temp_video_path,
                '-filter:v', f'minterpolate=fps={24}:mi_mode=mci:mc_mode=aobmc:me_mode=bidir:vsbmc=1',
                '-c:v', 'libx264',
                '-preset', 'medium',
                '-crf', '18',
                output_path
            ]
            
            subprocess.run(cmd, check=True)
            print(f"帧插值完成,最终视频保存至: {output_path}")
            os.remove(temp_video_path)
        except Exception as e:
            print(f"帧插值失败: {e}")
            os.rename(temp_video_path, output_path)
            print(f"使用原始视频: {output_path}")
    else:
        print("没有图像可用于创建视频")
    
    for file in os.listdir(temp_dir):
        os.remove(os.path.join(temp_dir, file))
    os.rmdir(temp_dir)


from argparse import ArgumentParser
import time
if __name__ == "__main__":
    parser = ArgumentParser()
    print('******Start rendering...******')
    start_time = time.time()
    
    parser.add_argument("--motion_path", type=str, required=True)
    parser.add_argument("--title", type=str, default="rendered_motion.mp4")

    args = parser.parse_args()
    with open(args.motion_path, 'rb') as f:
        motion = pickle.load(f)
    output_path = args.motion_path.replace(".pkl", ".mp4")
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    print(args.title)
    render_motion_to_video(motion, device, output_path, args.title)

    print(f"******Rendering finished in {time.time() - start_time:.2f} seconds.******")