|
""" |
|
For examples: |
|
|
|
>>> python visualize_2d.py \ |
|
--seq_dir synbody_v1_0/20230113/Downtown/LS_0114_004551_088_CAM002 \ |
|
--body_model {path_to_body_model} \ |
|
--save_path vis/LS_0114_004551_088_CAM002.mp4 |
|
""" |
|
|
|
from pathlib import Path |
|
|
|
import cv2 |
|
import numpy as np |
|
import pyrender |
|
import smplx |
|
import torch |
|
import tqdm |
|
import trimesh |
|
from pyrender.viewer import DirectionalLight, Node |
|
|
|
|
|
num_betas = 10 |
|
num_pca_comps = 45 |
|
flat_hand_mean = False |
|
|
|
w = 1280 |
|
h = 720 |
|
fx = fy = max(w, h) / 2 |
|
|
|
|
|
def load_data(seq_dir): |
|
seq_dir = Path(seq_dir) |
|
|
|
frame_paths = sorted(seq_dir.glob('rgb/*.jpeg')) |
|
images = [cv2.imread(str(p)) for p in frame_paths] |
|
|
|
|
|
person_paths = sorted(seq_dir.glob('smplx/*.npz')) |
|
persons = {} |
|
for p in person_paths: |
|
person_id = p.stem |
|
person = dict(np.load(p, allow_pickle=True)) |
|
for annot in person.keys(): |
|
if isinstance(person[annot], np.ndarray) and person[annot].ndim == 0: |
|
person[annot] = person[annot].item() |
|
persons[person_id] = person |
|
|
|
return images, persons |
|
|
|
|
|
def compute_camera_pose(camera_pose): |
|
|
|
|
|
|
|
R_convention = np.array([[1.0, 0.0, 0.0, 0.0], [0.0, -1.0, 0.0, 0.0], [0.0, 0.0, -1.0, 0.0], [0.0, 0.0, 0.0, 1.0]]) |
|
camera_pose = R_convention @ camera_pose |
|
|
|
return camera_pose |
|
|
|
|
|
def create_raymond_lights(): |
|
|
|
matrix = np.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]]) |
|
return [Node(light=DirectionalLight(color=np.ones(3), intensity=2.0), matrix=matrix)] |
|
|
|
|
|
def draw_overlay(img, camera, camera_pose, meshes): |
|
scene = pyrender.Scene(bg_color=[0.0, 0.0, 0.0, 0.0], ambient_light=(0.3, 0.3, 0.3)) |
|
|
|
for i, mesh in enumerate(meshes): |
|
scene.add(mesh, f'mesh_{i}') |
|
|
|
|
|
scene.add(camera, pose=camera_pose) |
|
|
|
light_nodes = create_raymond_lights() |
|
for node in light_nodes: |
|
scene.add_node(node) |
|
|
|
r = pyrender.OffscreenRenderer(viewport_width=w, viewport_height=h, point_size=1) |
|
color, _ = r.render(scene, flags=pyrender.RenderFlags.RGBA) |
|
color = color.astype(np.float32) / 255.0 |
|
|
|
valid_mask = color > 0 |
|
img = img / 255 |
|
output_img = color * valid_mask + (1 - valid_mask) * img |
|
img = (output_img * 255).astype(np.uint8) |
|
|
|
return img |
|
|
|
|
|
def draw_bboxes(img, bboxes): |
|
for person_id, bbox in bboxes.items(): |
|
x, y, w, h = bbox |
|
x, y, w, h = int(x), int(y), int(w), int(h) |
|
img = cv2.rectangle(img, (x, y), (x + w, y + h), (0, 0, 255), 2) |
|
img = cv2.putText(img, person_id, (x, y - 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 0, 255), 2) |
|
|
|
return img |
|
|
|
|
|
def visualize_2d(seq_dir, body_model_path, save_path): |
|
|
|
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu') |
|
|
|
|
|
body_model = smplx.create( |
|
body_model_path, |
|
model_type='smplx', |
|
flat_hand_mean=flat_hand_mean, |
|
use_face_contour=True, |
|
use_pca=True, |
|
num_betas=num_betas, |
|
num_pca_comps=num_pca_comps, |
|
).to(device) |
|
|
|
|
|
camera = pyrender.camera.IntrinsicsCamera(fx=fx, fy=fy, cx=w / 2, cy=h / 2) |
|
camera_pose = compute_camera_pose(np.eye(4)) |
|
material = pyrender.MetallicRoughnessMaterial( |
|
metallicFactor=0.0, alphaMode='OPAQUE', baseColorFactor=(1.0, 1.0, 0.9, 1.0) |
|
) |
|
|
|
|
|
images, persons = load_data(seq_dir) |
|
|
|
|
|
save_images = [] |
|
for frame_idx, image in enumerate(tqdm.tqdm(images)): |
|
|
|
meshes = [] |
|
for person in persons.values(): |
|
person = person['smplx'] |
|
model_output = body_model( |
|
global_orient=torch.tensor(person['global_orient'][[frame_idx]], device=device), |
|
body_pose=torch.tensor(person['body_pose'][[frame_idx]], device=device), |
|
transl=torch.tensor(person['transl'][[frame_idx]], device=device), |
|
betas=torch.tensor(person['betas'][[frame_idx]], device=device), |
|
left_hand_pose=torch.tensor(person['left_hand_pose'][[frame_idx]], device=device), |
|
right_hand_pose=torch.tensor(person['right_hand_pose'][[frame_idx]], device=device), |
|
return_verts=True, |
|
) |
|
vertices = model_output.vertices.detach().cpu().numpy().squeeze() |
|
faces = body_model.faces |
|
|
|
out_mesh = trimesh.Trimesh(vertices, faces, process=False) |
|
mesh = pyrender.Mesh.from_trimesh(out_mesh, material=material) |
|
meshes.append(mesh) |
|
|
|
image = draw_overlay(image, camera, camera_pose, meshes) |
|
|
|
|
|
|
|
|
|
|
|
save_images.append(image) |
|
|
|
|
|
Path(save_path).parent.mkdir(parents=True, exist_ok=True) |
|
fourcc = cv2.VideoWriter_fourcc(*'mp4v') |
|
video = cv2.VideoWriter(save_path, fourcc, fps=15, frameSize=(w, h)) |
|
for image in save_images: |
|
video.write(image) |
|
video.release() |
|
|
|
print(f'Visualization video saved at {save_path}') |
|
|
|
|
|
if __name__ == '__main__': |
|
import argparse |
|
|
|
parser = argparse.ArgumentParser() |
|
parser.add_argument('--seq_dir', type=str, required=True, help='directory containing the sequence data.') |
|
parser.add_argument( |
|
'--body_model_path', type=str, required=True, help='directory in which SMPL body models are stored.' |
|
) |
|
parser.add_argument('--save_path', type=str, required=True, help='path to save the visualization video.') |
|
args = parser.parse_args() |
|
|
|
visualize_2d(args.seq_dir, args.body_model_path, args.save_path) |
|
|