import os
from os.path import join, isfile, dirname, basename
import sys
sys.path.append("/home/liuyun/HHO-dataset/data_processing/Tink")
sys.path.append("/home/liuyun/HHO-dataset/data_processing/")
sys.path.append("/home/liuyun/codebases/HHO_VAE/")
from model.discriminator import PairNet
from config.config import load_config
import argparse
import numpy as np
import pickle
import torch
from torch import nn
import pytorch3d
import pytorch3d.io as IO
import trimesh
from smplx import smplx
import cv2
import imageio
from copy import deepcopy
from utils.txt2intrinsic import txt2intrinsic
from smplx.smplx.utils import Struct, to_tensor, to_np
from utils.pyt3d_wrapper import Pyt3DWrapper
from utils.avi2depth import avi2depth
from utils.time_align import time_align
from utils.process_timestamps import txt_to_paried_frameids, paired_frameids_to_txt
from utils.contact import compute_contact
from utils.VTS_object import get_obj_info, get_obj_name_correspondance
from utils.visualization import save_mesh
from utils.load_smplx_params import load_multiperson_smplx_params
from utils.object_retargeting import obj_retargeting, obj_retargeting_new_obj
from utils.contact import compute_contact_and_closest_point
from smplx.smplx.lbs import batch_rodrigues
from transforms3d.axangles import mat2axangle
import open3d as o3d
from optimization.utils import local_pose_to_global_orientation
from optimization.bvh2smplx import Simple_SMPLX, create_SMPLX_model
from utils.retargeting_visualization import HOI_visualization, obj_visualization
import matplotlib.pyplot as plt
import time
from utils.simplify_mesh import simplify_mesh
from utils.get_joints import get_joints
from tink.transform_contact_info import tranfer_contact_to_new_obj, get_obj_path
from tink.cal_contact_info import to_pointcloud
from tqdm import tqdm
import cv2
from moviepy.editor import VideoFileClip, clips_array
from utils.pyTorchChamferDistance.chamfer_distance import ChamferDistance
from utils.mesh import save_mesh
import json
import random



HAND_VERT_IDS = {
    'lthumb':		5361,
    'lindex':		4933,
    'lmiddle':		5058,
    'lring':		5169,
    'lpinky':		5286,
    'rthumb':		8079,
    'rindex':		7669,
    'rmiddle':		7794,
    'rring':		7905,
    'rpinky':		8022,
}

model_path = "/share/human_model/models/smplx/SMPLX_NEUTRAL.npz"
model_data = np.load(model_path, allow_pickle=True)
data_struct = Struct(**model_data)
shapedirs = data_struct.shapedirs
v_template = data_struct.v_template
J_regressor = data_struct.J_regressor
parents = data_struct.kintree_table[0]
left_hand_components = data_struct.hands_componentsl[:12]
right_hand_components = data_struct.hands_componentsr[:12]

def map_obj_to_sdf(obj_name):
    obj_map = {
        "chair009": {"id": "h01chair9", "type": "chair"},
        "chair005": {"id": "h01chair5", "type": "chair"},
        "chair002": {"id": "h01chair2", "type": "chair"},
        "chair001": {"id": "h01chair1", "type": "chair"},
        "chair006": {"id": "h01chair6", "type": "chair"},
        "chair010": {"id": "h01chair10", "type": "chair"},
        "chair011": {"id": "h01chair11", "type": "chair"},
        "chair012": {"id": "h01chair12", "type": "chair"},
        "chair022": {"id": "h01Chair022", "type": "chair"},
        "desk001": {"id": "h02desk1", "type": "table"},
        "desk002": {"id": "h02desk2", "type": "table"},
        "desk003": {"id": "h02desk3", "type": "table"},
        "desk005": {"id": "h02desk5", "type": "table"},
        "desk007": {"id": "h02desk7", "type": "table"},
        "desk008": {"id": "h02desk8", "type": "table"},
        "desk009": {"id": "h02desk9", "type": "table"},
        "desk020": {"id": "h02Desk020", "type": "table"},
        "desk021": {"id": "h02Desk021", "type": "table"},
        "desk023": {"id": "h02Desk023", "type": "table"}
    }
    return obj_map[obj_name]

def create_empty_SMPLX_params(N, N_betas=10, N_expression=10, N_hand_pca=12, device="cuda:0"):
    empty_SMPLX_params = {
        "betas": torch.zeros((N, N_betas), dtype=torch.float32).to(device),
        "expression": torch.zeros((N, N_expression), dtype=torch.float32).to(device),
        "global_orient": torch.zeros((N, 3), dtype=torch.float32).to(device),
        "transl": torch.zeros((N, 3), dtype=torch.float32).to(device),
        "body_pose": torch.zeros((N, 21, 3)).to(device),
        "left_hand_pose": torch.zeros((N, N_hand_pca)).to(device),
        "right_hand_pose": torch.zeros((N, N_hand_pca)).to(device),
    }
    return empty_SMPLX_params


class SMPLX_HH(nn.Module):
    def __init__(self, smplx_model, init_smplx_params, cfg):
        super(SMPLX_HH, self).__init__()
        self.smplx_model = smplx_model
        self.smplx_expression = nn.Parameter(init_smplx_params["person1"]["expression"].clone().detach(), requires_grad=False)

        self.smplx_betas_person1 = nn.Parameter(init_smplx_params["person1"]["betas"].clone().detach(), requires_grad=False)
        self.smplx_global_orient_person1 = nn.Parameter(init_smplx_params["person1"]["global_orient"].clone().detach(), requires_grad=True)
        self.smplx_body_pose_person1 = nn.Parameter(init_smplx_params["person1"]["body_pose"].clone().detach(), requires_grad=True)
        self.smplx_left_hand_pose_person1 = nn.Parameter(init_smplx_params["person1"]["left_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_right_hand_pose_person1 = nn.Parameter(init_smplx_params["person1"]["right_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_transl_person1 = nn.Parameter(init_smplx_params["person1"]["transl"].clone().detach(), requires_grad=True)
        self.smplx_jaw_pose_person1 = nn.Parameter(torch.zeros([self.smplx_betas_person1.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_leye_pose_person1 = nn.Parameter(torch.zeros([self.smplx_betas_person1.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_reye_pose_person1 = nn.Parameter(torch.zeros([self.smplx_betas_person1.shape[0], 3], dtype=torch.float32), requires_grad=False)

        self.smplx_betas_person2 = nn.Parameter(init_smplx_params["person2"]["betas"].clone().detach(), requires_grad=False)
        self.smplx_global_orient_person2 = nn.Parameter(init_smplx_params["person2"]["global_orient"].clone().detach(), requires_grad=True)
        self.smplx_body_pose_person2 = nn.Parameter(init_smplx_params["person2"]["body_pose"].clone().detach(), requires_grad=True)
        self.smplx_left_hand_pose_person2 = nn.Parameter(init_smplx_params["person2"]["left_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_right_hand_pose_person2 = nn.Parameter(init_smplx_params["person2"]["right_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_transl_person2 = nn.Parameter(init_smplx_params["person2"]["transl"].clone().detach(), requires_grad=True)
        self.smplx_jaw_pose_person2 = nn.Parameter(torch.zeros([self.smplx_betas_person2.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_leye_pose_person2 = nn.Parameter(torch.zeros([self.smplx_betas_person2.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_reye_pose_person2 = nn.Parameter(torch.zeros([self.smplx_betas_person2.shape[0], 3], dtype=torch.float32), requires_grad=False)
        
    
    def forward(self):
        result_joints_person1 = get_joints(self.smplx_global_orient_person1, self.smplx_betas_person1, self.smplx_body_pose_person1, self.smplx_transl_person1, self.smplx_left_hand_pose_person1, self.smplx_right_hand_pose_person1, left_hand_components, right_hand_components, shapedirs, v_template, J_regressor, parents)
        result_joints_person2 = get_joints(self.smplx_global_orient_person2, self.smplx_betas_person2, self.smplx_body_pose_person2, self.smplx_transl_person2, self.smplx_left_hand_pose_person2, self.smplx_right_hand_pose_person2, left_hand_components, right_hand_components, shapedirs, v_template, J_regressor, parents)
        results = {
            "person1": {
                # "vertices": result_vertices_person1,
                "joints": result_joints_person1,
                "betas": self.smplx_betas_person1,
                "expression": self.smplx_expression,
                "global_orient": self.smplx_global_orient_person1,
                "transl": self.smplx_transl_person1,
                "body_pose": self.smplx_body_pose_person1,
                "left_hand_pose": self.smplx_left_hand_pose_person1,
                "right_hand_pose": self.smplx_right_hand_pose_person1,
            },
            "person2": {
                # "vertices": result_vertices_person2,
                "joints": result_joints_person2,
                "betas": self.smplx_betas_person2,
                "expression": self.smplx_expression,
                "global_orient": self.smplx_global_orient_person2,
                "transl": self.smplx_transl_person2,
                "body_pose": self.smplx_body_pose_person2,
                "left_hand_pose": self.smplx_left_hand_pose_person2,
                "right_hand_pose": self.smplx_right_hand_pose_person2,
            }
        }
        return results


def compute_contact_info(human_params, smplx_model, idx, obj_vertices, threshould=0.05, device="cuda:0"):
    """
    [input]
    * human_params: SMPLX params
    * idx: frame idx
    * obj_vertices: torch.float32, shape = (M, 3)
    
    human SMPLX mesh: shape = (N, 3)
    
    [return]
    * contact: torch.bool, shape = (N)
    * dist: torch.float32, shape = (N)
    * closest_point: torch.int64, shape = (N)
    """
    result_model = smplx_model(betas=human_params["betas"][idx:idx+1].detach().to(device), expression=human_params["expression"][idx:idx+1].detach().to(device), global_orient=human_params["global_orient"][idx:idx+1].detach().to(device), transl=human_params["transl"][idx:idx+1].detach().to(device), body_pose=human_params["body_pose"][idx:idx+1].detach().to(device), left_hand_pose=human_params["left_hand_pose"][idx:idx+1].detach().to(device), right_hand_pose=human_params["right_hand_pose"][idx:idx+1].detach().to(device), return_verts=True)
    human_vertices = result_model.vertices[0]  # human vertices
    contact, dist, closest_point = compute_contact_and_closest_point(human_vertices, obj_vertices, threshould=threshould)
    return contact, dist, closest_point


def naive_contact_loss(person_origin_contact_info, ids, new_human_vertices, new_obj_vertices):
    # get contact_info from original data
    contact_flag = person_origin_contact_info["contact"][ids]  # (B, N_human_vertex)
    dist = person_origin_contact_info["dist"][ids]  # (B, N_human_vertex)
    closest_point = person_origin_contact_info["closest_point"][ids]  # (B, N_human_vertex)
    B, N_human_vertex = closest_point.shape
    
    # contact loss
    # TODO: use contact_flag !!!
    rows = torch.arange(0, B).unsqueeze(1).repeat(1, N_human_vertex).reshape(-1).to(device)  # (B * N_human_vertex)
    # print(new_human_vertices.shape, new_obj_vertices.shape, rows.shape, closest_point.shape)
    real_diff = new_human_vertices - new_obj_vertices[rows, closest_point.reshape(-1)].reshape(B, N_human_vertex, 3)  # (B, N_human_vertex, 3)
    contact_loss = torch.sum(contact_flag * torch.abs(torch.sum(real_diff**2, dim=-1)**(0.5) - dist))
    return contact_loss

def naive_joint_contact_loss(person_origin_contact_info, ids, new_human_joints, new_obj_vertices, origin_human_joints):
    dist = person_origin_contact_info["dist"][ids]
    
    closest_point = person_origin_contact_info["closest_point"][ids]
    contact_flag = person_origin_contact_info["contact"][ids]
    B, N_human_vertex = closest_point.shape

    # contact loss
    hand_vertex_closest = closest_point[:, list(HAND_VERT_IDS.values())] # B x 10
    hand_vertex_dist = dist[:, list(HAND_VERT_IDS.values())] # B x 10
    new_obj_contact_vert = new_obj_vertices[torch.arange(B).unsqueeze(1), hand_vertex_closest] # B x 10 x 3
    ori_vec = origin_human_joints[:, 66:76, :] - new_obj_contact_vert
    hand_vertex_diff_vec = new_human_joints[:, 61:71, :] - new_obj_contact_vert # B x 10 x 3
    # print((torch.sum(hand_vertex_diff**2, dim=-1)**(0.5)).shape)
    hand_vertex_contact_loss = torch.sum((torch.sum(hand_vertex_diff_vec**2, dim=-1)**(0.5) - hand_vertex_dist)**2)
    # contact_vec_loss = torch.sum((hand_vertex_diff_vec - ori_vec)**2)
    return hand_vertex_contact_loss

def contact_loss_xrf(person_origin_contact_info, ids, new_joint, new_vert):
    # B x 10
    dist = person_origin_contact_info["dist"][ids]
    closest_point = person_origin_contact_info["closest_point"][ids]
    flag = person_origin_contact_info["contact"][ids]
    B, _ = closest_point.shape
    
    new_obj_contact_vert = new_vert[torch.arange(B).unsqueeze(1), closest_point] # B x 10 x 3
    hand_vertex_diff_vec = new_joint[:, 61:71, :] - new_obj_contact_vert # B x 10 x 3
    contact_loss = torch.sum(flag * torch.abs(torch.sum(hand_vertex_diff_vec**2, dim=-1)**(0.5) - dist))
    return contact_loss

def contact_loss_chamfer(person_origin_contact_info, ids, new_joint, new_vert):
    chamfer_dist = ChamferDistance()
    flag = person_origin_contact_info["contact"][ids]
    closest_point = person_origin_contact_info["closest_point"][ids]
    B, _ = closest_point.shape
    
    new_joint_pcd = new_joint[:, 61:71, :].float() # B x 10 x 3
    new_obj_contact_vert = new_vert[torch.arange(B).unsqueeze(1), closest_point].float()

    dist1, dist2 = chamfer_dist(new_joint_pcd, new_obj_contact_vert)
    return torch.mean(flag * dist1) + torch.mean(flag * dist2)



# new_contact_area["person1"] (B, contact_index)
def set_contact_area(person_origin_contact_info, ids, new_contact_area):
    """
    [input]
    * person_origin_contact_info: dict
    * ids: frame idx list
    * new_contact_area: B * N_human_vertex like new closest_point
    
    [return]
    * contact: torch.bool, shape = (N)
    * dist: torch.float32, shape = (N)
    * new_closest_point: torch.int64, shape = (N)
    """
    contact_flag = person_origin_contact_info["contact"][ids]
    dist = person_origin_contact_info["dist"][ids]
    closest_point = person_origin_contact_info["closest_point"][ids]
    B, N_human_vertex = closest_point.shape

    person_origin_contact_info["closest_point"][ids] = new_contact_area[ids]
    

def pointcloud_render(pointcloud, intrinsic, wrapper, index=None):
    image = np.zeros((900, 1200, 3))
    image[:] = [255,255,255]
    R = wrapper.cameras[0].R.squeeze().detach().cpu().numpy()
    T = wrapper.cameras[0].T.detach().cpu().numpy()
    extrinsic = np.eye(4)
    extrinsic[:3, :3] = R.T
    extrinsic[:3, 3] = T
    P = np.concatenate((pointcloud, np.ones((pointcloud.shape[0], 1))), axis=-1)
    P = P @ np.linalg.inv(extrinsic).T
    P = P[:, :3]
    uv = P @ intrinsic.T
    uv = uv[:, :2] / uv[:, 2:]
    uv = uv.astype(np.int32)
    # print(uv[:, 0].max(), uv[:, 0].min(), uv[:, 1].max(), uv[:, 1].min())
    for i in range(uv.shape[0]):
        if 0 <= uv[i, 0] < 900 and 0 <= uv[i, 1] < 1200:
            image[uv[i, 0], uv[i, 1]] = [255, 0, 0]
    if index is not None:
        for i in index:
            if 0 <= uv[i, 0] < 900 and 0 <= uv[i, 1] < 1200:
                image[uv[i, 0], uv[i, 1]] = [0, 0, 255]
    
    return image


def find_closest_point(p, vert):
    """
    p: a point, shape = (3,)
    vert: a point cloud, shape = (N, 3)
    
    return: the closest point index
    """
    dist = (((vert - p.reshape(1, 3))**2).sum(axis=-1))**(0.5)  # shape = (N,)
    return dist.argmin()

    

def HOI_contact_visualize(data_dir, save_dir, origin_mesh, ori_obj_name, obj_mesh, start_frame, end_frame, cfg, device="cuda:0", use_new_obj=False, new_obj_name=None, contact_vis=False, obj_dataset_dir=None):
    """
    目前这一环节只优化human pose
    """
    assert not new_obj_name is None
    
    start_time = time.perf_counter()
    os.makedirs(save_dir, exist_ok=True)
    use_pca, num_pca_params = True, 12

    # (1) load gt human poses and simplify obj_mesh
    try:
        multiperson_SMPLX_params = load_multiperson_smplx_params(join(data_dir, "SMPLX_fitting"), start_frame=start_frame, end_frame=end_frame, device=device)
    except Exception as e:
        raise e

    print(multiperson_SMPLX_params["person1"]["body_pose"].shape)
    print(multiperson_SMPLX_params.keys(), multiperson_SMPLX_params["person2"].keys(), multiperson_SMPLX_params["person2"]["body_pose"].shape, multiperson_SMPLX_params["person1"]["joints"].shape)

    count_time = time.perf_counter()
    print("time for loading data and simplifying meshes: {}s".format(str(count_time - start_time)))
    
    # (2) compute per-frame HOH meshes and compute contact

    if use_new_obj:
        target_pcd = to_pointcloud(obj_mesh)
        vert = np.asarray(target_pcd.points)
    else:
        vert, face = obj_mesh.vertices, obj_mesh.faces


    origin_vert, origin_face = origin_mesh.vertices, origin_mesh.faces
    object_dir = join(data_dir, 'aligned_objposes.npy')
    origin_pose = np.load(object_dir, allow_pickle=True)[start_frame:end_frame]
    origin_vert_seq = (np.array(origin_pose[:, :3, :3] @ origin_vert.T)).transpose(0, 2, 1) + np.expand_dims(origin_pose[:, :3, 3], axis=1)
    origin_vert_seq = torch.from_numpy(origin_vert_seq).to(device)  # (N, 3)
    
    ######################################################################
    # preprocess contact areas 10675 x 3
    print("start preparing contact areas ...")
    origin_contact_info = {
        "person1": {"contact": [], "dist": [], "closest_point": []},
        "person2": {"contact": [], "dist": [], "closest_point": []},
    }
    contact_threshould=0.05
    # contact_threshould = 0.01
    smplx_model = create_SMPLX_model(use_pca=use_pca, num_pca_comps=num_pca_params, batch_size=1, device=device)

    for idx in range(0, end_frame - start_frame):
        # person1 to original obj
        
        contact, dist, closest_point = compute_contact_info(multiperson_SMPLX_params["person1"], smplx_model, idx, origin_vert_seq[idx], threshould=contact_threshould, device=device)
        origin_contact_info["person1"]["contact"].append(contact.detach().cpu().numpy())
        origin_contact_info["person1"]["dist"].append(dist.detach().cpu().numpy())
        origin_contact_info["person1"]["closest_point"].append(closest_point.detach().cpu().numpy())
        
        # person2 to original obj
        contact, dist, closest_point = compute_contact_info(multiperson_SMPLX_params["person2"], smplx_model, idx, origin_vert_seq[idx], threshould=contact_threshould, device=device)
        origin_contact_info["person2"]["contact"].append(contact.detach().cpu().numpy())
        origin_contact_info["person2"]["dist"].append(dist.detach().cpu().numpy())
        origin_contact_info["person2"]["closest_point"].append(closest_point.detach().cpu().numpy())
        

    for person in origin_contact_info:
        origin_contact_info[person]["contact"] = torch.tensor(origin_contact_info[person]["contact"], dtype=torch.bool).to(device)
        origin_contact_info[person]["dist"] = torch.tensor(origin_contact_info[person]["dist"], dtype=torch.float32).to(device)
        origin_contact_info[person]["closest_point"] = torch.tensor(origin_contact_info[person]["closest_point"], dtype=torch.int64).to(device)
    print("finish preparing contact areas !!!")

    print(origin_contact_info["person1"]["contact"].shape, origin_contact_info["person1"]["dist"].shape, origin_contact_info["person1"]["closest_point"].shape)
    
    ###########################################################################################################################
    # liuyun: visualize original contact areas (for reference motion)
    
    intrinsic = np.array([[-600, 0, 640], [0, -600, 360], [0, 0, 1]])
    pyt3d_wrapper_view1 = Pyt3DWrapper(image_size=(1200, 900), use_fixed_cameras=False, eyes=[np.float32([0.0, -4.0, 4.0])], intrin=intrinsic, device="cuda:0")
    
    # hand_vertex_indices
    hand_vertex_indices = list(HAND_VERT_IDS.values())
    assert len(hand_vertex_indices) == 10
    
    if contact_vis:
        for person in origin_contact_info:
            contact_o = origin_contact_info[person]["closest_point"]
            
            origin_pcd = o3d.geometry.PointCloud()
            origin_pcd.points = o3d.utility.Vector3dVector(origin_vert)
            pcd_tree = o3d.geometry.KDTreeFlann(origin_pcd)
            # origin_vert: origin_mesh.vertices (gt motion的obj mesh的顶点)
            draw_vert = (origin_vert @ np.array([[-0.01238667,-0.02917804,0.99949747],[-0.02601937,0.9992451,0.02884821],[-0.99958469,-0.02564896,-0.01313651]]).T+np.array([[-0.48912618,0.3995658,-0.29797912]])) @ np.array([[0, -1, 0], [-0.966, 0, 0.26], [-0.26, 0, -0.966]]) + np.array([[1.0, 1.0, 1.0]])
            imgs_original_contact_visualization = []
            
            for idx in range(start_frame, end_frame):
                all_index = []
                for i in range(10):
                    [k, index, _] = pcd_tree.search_radius_vector_3d(origin_pcd.points[contact_o[idx][hand_vertex_indices[i]]], 0.02)
                    all_index.extend(index)
                img1 = pointcloud_render(draw_vert, intrinsic, pyt3d_wrapper_view1, index=all_index)
                img1 = img1[450:, 400:1000, :]
                img1 = cv2.resize(img1, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
                imgs_original_contact_visualization.append(img1.astype(np.uint8))
                
            print("save video ...")
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            videoWriter = cv2.VideoWriter(join(data_dir, f"{person}_gt_contact.mp4"), fourcc, 10, (1200, 900))
            for frame in imgs_original_contact_visualization:
                videoWriter.write(frame)
            videoWriter.release()
            print(f"done visualizing gt contact areas to {join(data_dir, f'{person}_gt_contact.mp4')} !!!")
    
    ###########################################################################################################################
    
    if use_new_obj:
        target_pcd = to_pointcloud(obj_mesh)
        print("transfering contact areas ...")

        contact_points = {
            "person1": {"dist": [], "closest_point": [], "contact": []},
            "person2": {"dist": [], "closest_point": [], "contact": []},
        }

        for person in contact_points:
            contact_points[person]["dist"] = origin_contact_info[person]["dist"][:, list(HAND_VERT_IDS.values())].detach().cpu().numpy() # N x 10
            contact_points[person]["closest_point"] = origin_contact_info[person]["closest_point"][:, list(HAND_VERT_IDS.values())].detach().cpu().numpy() # N x 10
            contact_points[person]["contact"] = origin_contact_info[person]["contact"][:, list(HAND_VERT_IDS.values())].detach().cpu().numpy()

        contact_area = {
            "person1": [],
            "person2": [],
        }
        for person in contact_area:
            contact_area[person] = np.unique(contact_points[person]["closest_point"])

        new_contact_info = contact_points.copy()
        for person in origin_contact_info:
            source_contact = contact_area[person]
            # input: source contact, list, output: source contact, list; target contact, list
            os.makedirs(join(save_dir, new_obj_name + "_contact"), exist_ok = True)
            _, target_contact = tranfer_contact_to_new_obj("/home/liuyun/HHO-dataset/data_processing/Tink/DeepSDF_OakInk/data/sdf/", map_obj_to_sdf(ori_obj_name)["id"], new_obj_name, origin_mesh, map_obj_to_sdf(ori_obj_name)["type"], source_contact, person, visualize=True, vis_save_file=join(save_dir, new_obj_name + "_contact"))
            # TODO 处理过于远的点


def parse_args():
    parser = argparse.ArgumentParser()
    ############# 数据 ########################
    # parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230807_2/002")  # 搬桌子
    # parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230807_2/004")  # 旋转桌子
    parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230807_1/017")  # 搬棍子
    # parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230805_1/002")  # 旋转椅子
    parser.add_argument('--start_frame', '-s', type=int, default=0)
    parser.add_argument('--end_frame', '-e', type=int, default=300)
    parser.add_argument('--save_filename', type=str, default="compare_result.mp4")
    parser.add_argument('--device', type=str, default="cuda:0")
    parser.add_argument('--scale_x', type=float, default=1.0)
    parser.add_argument('--scale_y', type=float, default=1.0)
    parser.add_argument('--scale_z', type=float, default=1.0)
    parser.add_argument('--use_new_obj', action="store_true")  # true则使用new_obj
    parser.add_argument('--new_obj_id', type=str, default="")
    parser.add_argument('--force_retarget_obj', action="store_true")  # true则强制重新做obj_retargeting
    parser.add_argument('--objpose_filename', type=str, default="obj_poses.npy")
    parser.add_argument('--objmesh_filename', type=str, default="new_obj.obj")
    parser.add_argument('--npy_save_filename', type=str, default="HOI_retargeting_result.npy")
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    obj_dataset_dir = "/data3/datasets/HHO_object_dataset_final/"
    hho_dataset_dir = "/share/datasets/hhodataset/VTS/"
    # obj_data_path = "test/chair10_m.obj"
    cfg = {
            "retarget_person1": True,
            "retarget_person2": True,
    }
    
    virtual_meta = list(json.load(open("/home/liuyun/HHO-dataset/data_processing/Tink/DeepSDF_OakInk/data/meta/virtual_object_id.json", "r")).keys())
    
    args = parse_args()
    for dir in os.listdir(hho_dataset_dir):
        if not os.path.isdir(join(hho_dataset_dir, dir)):
                print("skipping...")
                continue
        for data_dir in os.listdir(join(hho_dataset_dir, dir)):
            # data_dir = args.data_dir
            if not isfile(join(hho_dataset_dir, dir, data_dir, 'aligned_objposes.npy')):
                print("skipping...")
                continue
            
            data_dir = join(hho_dataset_dir, dir, data_dir)
            
            # skip retargeted clips
            if "20231020" in data_dir and ("92" in data_dir or "117" in data_dir or "130" in data_dir or "118" in data_dir or "93" in data_dir or "122" in data_dir or "129" in data_dir):
                continue
            
            obj_retargeting_save_dir = join(data_dir, "obj_retargeting")
            HOI_retargeting_save_dir = join(data_dir, "HOI_retargeting")

            obj_name, obj_data_path = get_obj_info(data_dir, obj_dataset_dir)
            print(obj_name, obj_data_path)
            
            obj_name = obj_name.lower()

            if not (("chair" in obj_name) or ("desk" in obj_name)):
                print(f"skipping {obj_name}...")
                continue
            
            try:
                map_obj_to_sdf(obj_name)
            except:
                print(f"skipping {obj_name}...")
                continue
                 
            if not isfile(obj_data_path):
                print(f"skipping {obj_name}...")
                continue
            
            device = args.device
            start_frame, end_frame = args.start_frame, args.end_frame
            
            if len(np.load(join(data_dir, 'aligned_objposes.npy'), allow_pickle=True)) < end_frame:
                end_frame = len(np.load(join(data_dir, 'aligned_objposes.npy'), allow_pickle=True))
                print("Warning: end_frame is larger than the length of object_result, set end_frame to {}".format(str(end_frame)))

            print(data_dir, "[", start_frame, end_frame, ")")
            
            if ("chair" in obj_name):
                filtered_meta = [k for k in virtual_meta if map_obj_to_sdf(obj_name) != k and (("chair" in k) or ("Chair" in k))]
            elif ("desk" in obj_name):
                filtered_meta = [k for k in virtual_meta if map_obj_to_sdf(obj_name) != k and (("desk" in k) or ("Desk" in k) or ("table" in k))]
                
            random.shuffle(filtered_meta)
            
            for new_obj_name in filtered_meta[:2]:

                target_pcd_save_file = join(HOI_retargeting_save_dir, f"{new_obj_name}_target_contact.pcd")
                
                if args.use_new_obj:
                    # new_obj_name = args.new_obj_id
                    print(obj_name, " retarget to ", new_obj_name)
                
                
                target_data_path = get_obj_path(new_obj_name)
                
                # obj_visualization(data_dir, obj_dataset_dir, objmesh_save_file, objpose_save_file, data_dir, start_frame, end_frame, device="cuda:0")

                # print(objmesh_save_file)
                origin_mesh = trimesh.load_mesh(obj_data_path)
                obj_mesh = trimesh.load_mesh(target_data_path)

                HOI_retargeting_result = HOI_contact_visualize(data_dir, HOI_retargeting_save_dir, origin_mesh, obj_name, obj_mesh, start_frame=start_frame, end_frame=end_frame, cfg=cfg, new_obj_name=new_obj_name, device=device, use_new_obj=args.use_new_obj, obj_dataset_dir=obj_dataset_dir)
            
                # visualization
                # HOI_retargeting_result = np.load(join(HOI_retargeting_save_dir, "HOI_retargeting_result.npy"), allow_pickle=True)
                # HOI_visualization(result_save_dir, objpose_save_file, objmesh_save_file, obj_dataset_dir, data_dir, data_dir, start_frame, end_frame, save_filename=video_save_file, device="cuda:0")
