import os
from os.path import join, isfile
import sys
sys.path.append("..")
import numpy as np
import pickle
import torch
from torch import nn
import pytorch3d
import pytorch3d.io as IO
import trimesh
from smplx import smplx
import cv2
import imageio
from utils.txt2intrinsic import txt2intrinsic
from utils.pyt3d_wrapper import Pyt3DWrapper
from utils.avi2depth import avi2depth
from utils.time_align import time_align
from utils.process_timestamps import txt_to_paried_frameids, paired_frameids_to_txt
from utils.contact import compute_contact
from utils.VTS_object import get_obj_info
from utils.visualization import save_mesh
from utils.load_smplx_params import load_multiperson_smplx_params
from utils.object_retargeting import obj_retargeting
from utils.contact import compute_contact_and_closest_point
from smplx.smplx.lbs import batch_rodrigues
from transforms3d.axangles import mat2axangle
import open3d as o3d
from optimization.bvh2smplx import Simple_SMPLX, create_SMPLX_model
from tqdm import tqdm
import torchvision.io as io
from moviepy.editor import VideoFileClip, clips_array
from utils.simplify_mesh import simplify_mesh
from tqdm import tqdm


def draw_head_orientation(img, head_pos, head_rot, camera_intrinsic, camera_extrinsic):
    p = np.float32([  # coordinate frame, (4, 3)
        [0, 0, 0],
        [0.2, 0, 0],
        [0, 0.2, 0],
        [0, 0, 0.2],
    ])
    p = (p @ head_rot.T) + head_pos.reshape(3)  # (4, 3), in world space
    p = np.concatenate((p, np.ones((p.shape[0], 1))), axis=-1)  # (4, 4), in world space
    p = p @ camera_extrinsic.transpose(1, 0)  # (4, 4), in camera space
    p = p[:, :3]  # (4, 3), in camera space
    uv = p @ camera_intrinsic.transpose(1, 0)
    uv = (uv[:, :2] / uv[:, 2:]).astype(np.int32)  # (4, 2), in image space
    
    # draw lines
    cv2.line(img, tuple(uv[0]), tuple(uv[1]), (0, 0, 255), 3)
    cv2.line(img, tuple(uv[0]), tuple(uv[2]), (0, 255, 0), 3)
    cv2.line(img, tuple(uv[0]), tuple(uv[3]), (255, 0, 0), 3)
    return img


def HOI_visualization(HOI_retargeting_save_dir, objpose_save_file, objmesh_save_file, obj_dataset_dir, data_dir, save_pth, start_frame, end_frame, specified=None, save_filename="compare_result.mp4", device="cuda:0"):
    new_mesh = trimesh.load_mesh(objmesh_save_file)
    origin_obj_name, obj_data_path = get_obj_info(data_dir, obj_dataset_dir)
    origin_mesh = trimesh.load_mesh(obj_data_path)

    if (origin_mesh.vertices.shape[0] > 10000):
        origin_mesh = simplify_mesh(origin_mesh)
        print("downsampled object mesh: vertices shape =", origin_mesh.vertices.shape)
    
    if (new_mesh.vertices.shape[0] > 10000):
        new_mesh = simplify_mesh(new_mesh)
        print("downsampled object mesh: vertices shape =", new_mesh.vertices.shape)
    
    new_vert, new_face = new_mesh.vertices, new_mesh.faces
    retarget_obj_pose = np.load(objpose_save_file, allow_pickle=True)
    
    origin_vert, origin_face = origin_mesh.vertices, origin_mesh.faces
    origin_obj_pose = np.load(join(data_dir, "aligned_objposes.npy"), allow_pickle=True)
    retarget_human_pose = np.load(HOI_retargeting_save_dir, allow_pickle=True)

    smplx_model = smplx.create("/share/human_model/models", model_type="smplx", gender="neutral", use_face_contour=False, num_betas=10, num_expression_coeffs=10, ext="npz", use_pca=True, num_pca_comps=12, flat_hand_mean=True).to(device)
    intrinsic = np.array([[-300, 0, 320], [0, -300, 180], [0, 0, 1]])
    # pyt3d_wrapper_view1 = Pyt3DWrapper(image_size=(600, 450), use_fixed_cameras=False, eyes=[np.float32([0.0, 3.0, -3.0])], intrin=intrinsic, device="cuda:0")
    # pyt3d_wrapper_view2 = Pyt3DWrapper(image_size=(600, 450), use_fixed_cameras=False, eyes=[np.float32([3.0, 3.0, -3.0])], intrin=intrinsic, device="cuda:0")
    # pyt3d_wrapper_view3 = Pyt3DWrapper(image_size=(600, 450), use_fixed_cameras=False, eyes=[np.float32([-3.0, 3.0, 0.0])], intrin=intrinsic, device="cuda:0")
    # pyt3d_wrapper_view4 = Pyt3DWrapper(image_size=(600, 450), use_fixed_cameras=False, eyes=[np.float32([0.01, 3.0, 0.01])], intrin=intrinsic, device="cuda:0")
    pyt3d_wrapper_view1 = Pyt3DWrapper(image_size=(600, 450), use_fixed_cameras=False, eyes=[np.float32([0.0, 3.0, -3.0])], intrin=intrinsic, device="cuda:0")
    pyt3d_wrapper_view2 = Pyt3DWrapper(image_size=(600, 450), use_fixed_cameras=False, eyes=[np.float32([3.0, 3.0, -3.0])], intrin=intrinsic, device="cuda:0")
    pyt3d_wrapper_view3 = Pyt3DWrapper(image_size=(600, 450), use_fixed_cameras=False, eyes=[np.float32([-3.0, 3.0, 0.0])], intrin=intrinsic, device="cuda:0")
    pyt3d_wrapper_view4 = Pyt3DWrapper(image_size=(600, 450), use_fixed_cameras=False, eyes=[np.float32([0.01, 3.0, 0.01])], intrin=intrinsic, device="cuda:0")
    
    video_v1 = []
    video_v2 = []
    video_v3 = []
    video_v4 = []
    ori_video = []
    multiperson_SMPLX_params = load_multiperson_smplx_params(join(data_dir, "SMPLX_fitting"), start_frame=0, end_frame=end_frame, device=device)
    origin_p1_SMPLX_params = multiperson_SMPLX_params["person1"]
    origin_p2_SMPLX_params = multiperson_SMPLX_params["person2"]
    for i in range(0, end_frame - start_frame):
        # end_num = min(50, end_frame - j)
        # multiperson_SMPLX_params = load_multiperson_smplx_params(join(data_dir, "SMPLX_fitting"), start_frame=j, end_frame=j+end_num, device=device)
        # for i in range(0, end_num):
            # print(retarget_obj_pose.shape)
        if retarget_obj_pose.shape[-2:] == (4, 4):
            # print("using mat")
            re_rot_vec = retarget_obj_pose[i + start_frame, :3, :3].squeeze()
            re_trans_vec = retarget_obj_pose[i + start_frame, :3, 3].squeeze()
        else:
            re_rot_vec = batch_rodrigues(retarget_obj_pose[i + start_frame]["rotation"]).cpu().numpy().squeeze()
            re_trans_vec = retarget_obj_pose[i + start_frame]["translation"].cpu().numpy().squeeze()
        re_vert = new_vert @ re_rot_vec.T + re_trans_vec
        re_seq_mesh = trimesh.Trimesh(vertices=re_vert, faces=new_face)
        # print(retarget_human_pose[i])
        re_p1_beta, re_p1_expression, re_p1_body_pose, re_p1_transl, re_p1_global_orient, re_p1_left_hand_pose, re_p1_right_hand_pose = retarget_human_pose[i]["person1"]["betas"], retarget_human_pose[i]["person1"]["expression"], retarget_human_pose[i]["person1"]["body_pose"], retarget_human_pose[i]["person1"]["transl"], retarget_human_pose[i]["person1"]["global_orient"], retarget_human_pose[i]["person1"]["left_hand_pose"], retarget_human_pose[i]["person1"]["right_hand_pose"]
        re_p2_beta, re_p2_expression, re_p2_body_pose, re_p2_transl, re_p2_global_orient, re_p2_left_hand_pose, re_p2_right_hand_pose = retarget_human_pose[i]["person2"]["betas"], retarget_human_pose[i]["person2"]["expression"], retarget_human_pose[i]["person2"]["body_pose"], retarget_human_pose[i]["person2"]["transl"], retarget_human_pose[i]["person2"]["global_orient"], retarget_human_pose[i]["person2"]["left_hand_pose"], retarget_human_pose[i]["person2"]["right_hand_pose"]
        # print(re_p1_left_hand_pose.shape)
        # print(re_p1_beta.shape, re_p1_expression.shape)
        p1_model = smplx_model(betas=re_p1_beta, expression=re_p1_expression, body_pose=re_p1_body_pose, transl=re_p1_transl, global_orient=re_p1_global_orient, left_hand_pose=re_p1_left_hand_pose, right_hand_pose=re_p1_right_hand_pose, return_verts=True)
        p2_model = smplx_model(betas=re_p2_beta, expression=re_p2_expression, body_pose=re_p2_body_pose, transl=re_p2_transl, global_orient=re_p2_global_orient, left_hand_pose=re_p2_left_hand_pose, right_hand_pose=re_p2_right_hand_pose, return_verts=True)
        p1_mesh = trimesh.Trimesh(vertices=p1_model.vertices.detach().cpu().numpy()[0], faces=p1_model.faces.detach().cpu().numpy())
        p2_mesh = trimesh.Trimesh(vertices=p2_model.vertices.detach().cpu().numpy()[0], faces=p2_model.faces.detach().cpu().numpy())
        # render_result = pyt3d_wrapper.render_meshes([p1_mesh, re_seq_mesh, p2_mesh])
        # img = (render_result[0]*255).astype(np.uint8)
        video_v1.append((pyt3d_wrapper_view1.render_meshes([p1_mesh, re_seq_mesh, p2_mesh])[0]*255).astype(np.uint8))
        video_v2.append((pyt3d_wrapper_view2.render_meshes([p1_mesh, re_seq_mesh, p2_mesh])[0]*255).astype(np.uint8))
        video_v3.append((pyt3d_wrapper_view3.render_meshes([p1_mesh, re_seq_mesh, p2_mesh])[0]*255).astype(np.uint8))
        video_v4.append((pyt3d_wrapper_view4.render_meshes([p1_mesh, re_seq_mesh, p2_mesh])[0]*255).astype(np.uint8))

        # ori video
        origin_vert_seq = origin_vert @ origin_obj_pose[i + start_frame][:3, :3].T + origin_obj_pose[i + start_frame][:3, 3]
        origin_mesh = trimesh.Trimesh(vertices=origin_vert_seq, faces=origin_face)
        ori_p1_beta, ori_p1_expression, ori_p1_body_pose, ori_p1_transl, ori_p1_global_orient, ori_p1_left_hand_pose, ori_p1_right_hand_pose = origin_p1_SMPLX_params["betas"][i + start_frame].unsqueeze(0), origin_p1_SMPLX_params["expression"][i + start_frame].unsqueeze(0), origin_p1_SMPLX_params["body_pose"][i + start_frame].unsqueeze(0), origin_p1_SMPLX_params["transl"][i + start_frame].unsqueeze(0), origin_p1_SMPLX_params["global_orient"][i + start_frame].unsqueeze(0), origin_p1_SMPLX_params["left_hand_pose"][i + start_frame].unsqueeze(0), origin_p1_SMPLX_params["right_hand_pose"][i + start_frame].unsqueeze(0)
        ori_p2_beta, ori_p2_expression, ori_p2_body_pose, ori_p2_transl, ori_p2_global_orient, ori_p2_left_hand_pose, ori_p2_right_hand_pose = origin_p2_SMPLX_params["betas"][i + start_frame].unsqueeze(0), origin_p2_SMPLX_params["expression"][i + start_frame].unsqueeze(0), origin_p2_SMPLX_params["body_pose"][i + start_frame].unsqueeze(0), origin_p2_SMPLX_params["transl"][i + start_frame].unsqueeze(0), origin_p2_SMPLX_params["global_orient"][i + start_frame].unsqueeze(0), origin_p2_SMPLX_params["left_hand_pose"][i + start_frame].unsqueeze(0), origin_p2_SMPLX_params["right_hand_pose"][i + start_frame].unsqueeze(0)
        # print(ori_p1_beta.shape, ori_p1_expression.shape)
        ori_p1_model = smplx_model(betas=ori_p1_beta, expression=ori_p1_expression, body_pose=ori_p1_body_pose, transl=ori_p1_transl, global_orient=ori_p1_global_orient, left_hand_pose=ori_p1_left_hand_pose, right_hand_pose=ori_p1_right_hand_pose, return_verts=True)
        ori_p2_model = smplx_model(betas=ori_p2_beta, expression=ori_p2_expression, body_pose=ori_p2_body_pose, transl=ori_p2_transl, global_orient=ori_p2_global_orient, left_hand_pose=ori_p2_left_hand_pose, right_hand_pose=ori_p2_right_hand_pose, return_verts=True)
        ori_p1_mesh = trimesh.Trimesh(vertices=ori_p1_model.vertices.detach().cpu().numpy()[0], faces=ori_p1_model.faces.detach().cpu().numpy())
        ori_p2_mesh = trimesh.Trimesh(vertices=ori_p2_model.vertices.detach().cpu().numpy()[0], faces=ori_p2_model.faces.detach().cpu().numpy())
        if specified is not None:
            ori_render_result = pyt3d_wrapper_view1.render_meshes([ori_p1_mesh, origin_mesh, ori_p2_mesh], specified_vertices=[[], specified[i], []])
        else:
            ori_render_result = pyt3d_wrapper_view1.render_meshes([ori_p1_mesh, origin_mesh, ori_p2_mesh])
        ori_img = (ori_render_result[0]*255).astype(np.uint8)
        ori_video.append(ori_img)

    videos = [video_v1, video_v2, video_v3, video_v4]
    if len(ori_video) > 0:
        print("save video")
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        ori_videoWriter = cv2.VideoWriter(join(save_pth, "ori_result.mp4"), fourcc, 10, (600, 450))
        for frame in ori_video:
            ori_videoWriter.write(frame)
        ori_videoWriter.release()

    if len(video_v1) > 0:
        print("save video")
        for video, i in zip(videos, range(4)):
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            videoWriter = cv2.VideoWriter(join(save_pth, f"retarget_result_{i}.mp4"), fourcc, 10, (600, 450))
            for frame in video:
                videoWriter.write(frame)
            videoWriter.release()
            print(f"down visualization {join(save_pth, 'retarget_result.mp4')}")

    
    clips_1 = [VideoFileClip(join(save_pth, "retarget_result_0.mp4")), VideoFileClip(join(save_pth, "retarget_result_1.mp4"))]
    clips_2 = [VideoFileClip(join(save_pth, "retarget_result_2.mp4")), VideoFileClip(join(save_pth, "retarget_result_3.mp4"))]
    video = clips_array([clips_1, clips_2])
    video.write_videofile(join(save_pth, save_filename))


def HOI_visualization_raw(obj_dataset_dir, data_dir, save_pth, start_frame, end_frame, device="cuda:0"):
    # new_mesh = trimesh.load_mesh(objmesh_save_file)
    origin_obj_name, obj_data_path = get_obj_info(data_dir, obj_dataset_dir)
    origin_mesh = trimesh.load_mesh(obj_data_path)
    if len(np.load(join(data_dir, 'aligned_objposes.npy'), allow_pickle=True)) < end_frame:
        end_frame = len(np.load(join(data_dir, 'aligned_objposes.npy'), allow_pickle=True))
        print("Warning: end_frame is larger than the length of object_result, set end_frame to {}".format(str(end_frame)))

    if (origin_mesh.vertices.shape[0] > 10000):
        origin_mesh, origin_mesh = simplify_mesh(origin_mesh, origin_mesh)
        print("downsampled object mesh: vertices shape =", origin_mesh.vertices.shape)
    
    # new_vert, new_face = new_mesh.vertices, new_mesh.faces
    
    origin_vert, origin_face = origin_mesh.vertices, origin_mesh.faces
    origin_obj_pose = np.load(join(data_dir, "aligned_objposes.npy"), allow_pickle=True)
    
    smplx_model = smplx.create("/share/human_model/models", model_type="smplx", gender="neutral", use_face_contour=False, num_betas=10, num_expression_coeffs=10, ext="npz", use_pca=True, num_pca_comps=12, flat_hand_mean=True).to(device)
    pyt3d_wrapper = Pyt3DWrapper(image_size=(400, 300), use_fixed_cameras=False, eyes=[np.float32([0.0, 3.0, -3.0])], device="cuda:0")
    video = []
    ori_video = []
    for j in range(start_frame, end_frame, 50):
        end_num = min(50, end_frame - j)
        # print(end_num)
        multiperson_SMPLX_params = load_multiperson_smplx_params(join(data_dir, "SMPLX_fitting"), start_frame=j, end_frame=j+end_num, device=device)
        origin_p1_SMPLX_params = multiperson_SMPLX_params["person1"]
        origin_p2_SMPLX_params = multiperson_SMPLX_params["person2"]
        for i in range(0, end_num):

            # ori video
            origin_vert_seq = origin_vert @ origin_obj_pose[i + start_frame][:3, :3].T + origin_obj_pose[i + start_frame][:3, 3]
            origin_mesh = trimesh.Trimesh(vertices=origin_vert_seq, faces=origin_face)
            ori_p1_beta, ori_p1_expression, ori_p1_body_pose, ori_p1_transl, ori_p1_global_orient, ori_p1_left_hand_pose, ori_p1_right_hand_pose = origin_p1_SMPLX_params["betas"][i].unsqueeze(0), origin_p1_SMPLX_params["expression"][i].unsqueeze(0), origin_p1_SMPLX_params["body_pose"][i].unsqueeze(0), origin_p1_SMPLX_params["transl"][i].unsqueeze(0), origin_p1_SMPLX_params["global_orient"][i].unsqueeze(0), origin_p1_SMPLX_params["left_hand_pose"][i].unsqueeze(0), origin_p1_SMPLX_params["right_hand_pose"][i].unsqueeze(0)
            ori_p2_beta, ori_p2_expression, ori_p2_body_pose, ori_p2_transl, ori_p2_global_orient, ori_p2_left_hand_pose, ori_p2_right_hand_pose = origin_p2_SMPLX_params["betas"][i].unsqueeze(0), origin_p2_SMPLX_params["expression"][i].unsqueeze(0), origin_p2_SMPLX_params["body_pose"][i].unsqueeze(0), origin_p2_SMPLX_params["transl"][i].unsqueeze(0), origin_p2_SMPLX_params["global_orient"][i].unsqueeze(0), origin_p2_SMPLX_params["left_hand_pose"][i].unsqueeze(0), origin_p2_SMPLX_params["right_hand_pose"][i].unsqueeze(0)
            # print(ori_p1_beta.shape, ori_p1_expression.shape)
            ori_p1_model = smplx_model(betas=ori_p1_beta, expression=ori_p1_expression, body_pose=ori_p1_body_pose, transl=ori_p1_transl, global_orient=ori_p1_global_orient, left_hand_pose=ori_p1_left_hand_pose, right_hand_pose=ori_p1_right_hand_pose, return_verts=True)
            ori_p2_model = smplx_model(betas=ori_p2_beta, expression=ori_p2_expression, body_pose=ori_p2_body_pose, transl=ori_p2_transl, global_orient=ori_p2_global_orient, left_hand_pose=ori_p2_left_hand_pose, right_hand_pose=ori_p2_right_hand_pose, return_verts=True)
            ori_p1_mesh = trimesh.Trimesh(vertices=ori_p1_model.vertices.detach().cpu().numpy()[0], faces=ori_p1_model.faces.detach().cpu().numpy())
            ori_p2_mesh = trimesh.Trimesh(vertices=ori_p2_model.vertices.detach().cpu().numpy()[0], faces=ori_p2_model.faces.detach().cpu().numpy())
            ori_render_result = pyt3d_wrapper.render_meshes([ori_p1_mesh, origin_mesh, ori_p2_mesh])
            ori_img = (ori_render_result[0]*255).astype(np.uint8)

            # video.append(img)
            ori_video.append(ori_img)


    if len(video) > 0:
        print("save video")
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        # videoWriter = cv2.VideoWriter(join(save_pth, "retarget_result.mp4"), fourcc, 10, (400, 300))
        ori_videoWriter = cv2.VideoWriter(join(save_pth, "ori_result.mp4"), fourcc, 10, (400, 300))
        for frame in ori_video:
            ori_videoWriter.write(frame)
        # videoWriter.release()
        print(f"down visualization {join(save_pth, 'ori_result.mp4')}")
        ori_videoWriter.release()


def obj_visualization(data_dir, obj_dataset_dir, obj_mesh_path, obj_pose_path, save_pth, start_frame, end_frame, device="cuda:0"):
    
    origin_obj_name, obj_data_path = get_obj_info(data_dir, obj_dataset_dir)
    origin_mesh = trimesh.load_mesh(obj_data_path)
    origin_vert, origin_face = origin_mesh.vertices, origin_mesh.faces
    origin_obj_pose = np.load(join(data_dir, "aligned_objposes.npy"), allow_pickle=True)
    
    mesh = trimesh.load_mesh(obj_mesh_path)
    vert, face = mesh.vertices, mesh.faces
    obj_pose_seq = np.load(obj_pose_path, allow_pickle=True)
    intrinsic = np.array([[-600, 0, 640], [0, -600, 360], [0, 0, 1]])
    pyt3d_wrapper = Pyt3DWrapper(image_size=(1200, 900), use_fixed_cameras=False, eyes=[np.float32([0.0, 3.0, -3.0])], intrin=intrinsic, device="cuda:0")
    video = []
    ori_video = []
    for i in range(start_frame, end_frame):
        ori_vert_seq = origin_vert @ origin_obj_pose[i][:3, :3].T + origin_obj_pose[i][:3, 3]
        ori_mesh = trimesh.Trimesh(vertices=ori_vert_seq, faces=origin_face)
        ori_render_result = pyt3d_wrapper.render_meshes([ori_mesh])
        ori_img = (ori_render_result[0]*255).astype(np.uint8)
        ori_video.append(ori_img)

        rot_vec = batch_rodrigues(obj_pose_seq[i]["rotation"]).cpu().numpy().squeeze()
        trans_vec = obj_pose_seq[i]["translation"].cpu().numpy().squeeze()
        re_vert = vert @ rot_vec.T + trans_vec
    
        new_mesh = trimesh.Trimesh(vertices=re_vert, faces=face)
        render_result = pyt3d_wrapper.render_meshes([new_mesh])
        img = (render_result[0]*255).astype(np.uint8)
        video.append(img)

    if len(video) > 0:
        print("save video")
        fourcc = cv2.VideoWriter_fourcc(*'mp4v')
        videoWriter = cv2.VideoWriter(join(save_pth, "retarget_result.mp4"), fourcc, 10, (1200, 900))
        ori_videoWriter = cv2.VideoWriter(join(save_pth, "ori_retarget_result.mp4"), fourcc, 10, (1200, 900))
        for frame in video:
            videoWriter.write(frame)
        for frame in ori_video:
            ori_videoWriter.write(frame)
        videoWriter.release()
        ori_videoWriter.release()
    
    clips_1 = [VideoFileClip(join(save_pth, "retarget_result.mp4")), VideoFileClip(join(save_pth, "ori_retarget_result.mp4"))]
    video = clips_array([clips_1])
    video.write_videofile(join(save_pth, "retarget.mp4"))
    print(f"down visualization {join(save_pth, 'retarget.mp4')}")




# if __name__ == "__main__":
#     obj_pose_seq = np.load('0806_1_012_12215.npy', allow_pickle=True)
#     q_mat = torch.from_numpy(np.array([[1.2, 0, 0, 0], [0, 2, 0, 0], [0, 0, 1.5, 0], [0, 0, 0, 1]]))
#     mesh = trimesh.load_mesh("/share/datasets/HHO_object_dataset_final/stick/stick6/stick006_m.obj")
#     vert, face = mesh.vertices, mesh.faces
#     re_vert = torch.matmul(torch.from_numpy(vert), q_mat[:3, :3].transpose(1, 0).type(torch.float64)) + q_mat[:3, 3]
#     pyt3d_wrapper = Pyt3DWrapper(image_size=(1280, 720), use_fixed_cameras=False, device="cuda:0")
#     data_dir = '/share/datasets/HHO_dataset/data/20230806_1/012/'
#     object_dir = join(data_dir, 'aligned_objposes.npy')
#     # 301 x 4 x 4
#     obj_pose_seq_2 = np.load(object_dir, allow_pickle=True)
#     video = []
#     ori_video = []
#     for i in tqdm(range(obj_pose_seq.shape[0])):
#         rot_vec = batch_rodrigues(obj_pose_seq[i]["rotation"]).cpu().numpy().squeeze()
#         trans_vec = obj_pose_seq[i]["translation"].cpu().numpy().squeeze()
        
#         # print(rot_vec @ obj_pose_seq_2[i][:3, :3])
#         # obj_pose = obj_pose_seq[i]
#         # print(obj_pose)
#         new_vert = re_vert @ rot_vec.T + trans_vec
#         new_mesh = trimesh.Trimesh(vertices=new_vert, faces=face)
#         # if 175 <= i <= 210:
#         #     trimesh.exchange.export.export_mesh(new_mesh, f"{i}.obj", file_type="obj")
#         # print(new_vert[:, 1].min())
#         specified_vert = np.where(new_vert[:, 1] < 0)
#         # print(specified_vert)
#         render_result = pyt3d_wrapper.render_meshes([new_mesh], specified_vertices=specified_vert)
#         img = (render_result[0]*255).astype(np.uint8)
#         img = draw_head_orientation(img, np.array([0, 0, 0]), np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), pyt3d_wrapper.intrin, pyt3d_wrapper.extrin)
#         video.append(img)

#         ori_pose = obj_pose_seq_2[i]
#         ori_vert = vert @ ori_pose[:3, :3].T + ori_pose[:3, 3]
#         ori_mesh = trimesh.Trimesh(vertices=ori_vert, faces=face)
#         ori_img = (pyt3d_wrapper.render_meshes([ori_mesh],)[0]*255).astype(np.uint8)
#         ori_img = draw_head_orientation(ori_img, np.array([0, 0, 0]), np.array([[1, 0, 0], [0, 1, 0], [0, 0, 1]]), pyt3d_wrapper.intrin, pyt3d_wrapper.extrin)
#         ori_video.append(ori_img)
    
#     print(np.array(video).shape)
#     io.write_video("0806_1_012_12215.mp4", torch.from_numpy(np.array(video)), 10)
#     io.write_video("0806_1_012_12215_ori.mp4", torch.from_numpy(np.array(ori_video)), 10)

#     clips = [VideoFileClip("0806_1_012_12215.mp4"),VideoFileClip("0806_1_012_12215_ori.mp4")]
#     video = clips_array([clips])
#     video.write_videofile('0806_1_012_12215_result.mp4')

if __name__ == "__main__":
    data_root_file = "/share/datasets/HHO_dataset/data/20230807_1/"
    obj_dataset_dir = "/share/datasets/HHO_object_dataset_final"
    for dir in os.listdir(data_root_file):
        # if not dir == '003':
        #     continue
        data_dir = join(data_root_file, dir)
        if isfile(data_dir):
            continue
        try:
            HOI_visualization_raw(obj_dataset_dir, data_dir, data_dir, 0, 300)
        except Exception as e:
            print(f"{e} in {data_dir}")
            continue