import os
from os.path import join, isfile, dirname, basename
import sys
sys.path.append("/home/liuyun/HHO-dataset/data_processing/Tink")
sys.path.append("/home/liuyun/HHO-dataset/data_processing/")
sys.path.append("/home/liuyun/codebases/HHO_VAE/")
from model.reward_model import RewardModel
from config.config import load_config
import argparse
import numpy as np
import pickle
import torch
from torch import nn
import pytorch3d
import pytorch3d.io as IO
import trimesh
from smplx import smplx
import cv2
import imageio
from copy import deepcopy
from utils.txt2intrinsic import txt2intrinsic
from smplx.smplx.utils import Struct, to_tensor, to_np
from utils.pyt3d_wrapper import Pyt3DWrapper
from utils.avi2depth import avi2depth
from utils.time_align import time_align
from utils.process_timestamps import txt_to_paried_frameids, paired_frameids_to_txt
from utils.contact import compute_contact
from utils.VTS_object import get_obj_info
from utils.visualization import save_mesh
from utils.load_smplx_params import load_multiperson_smplx_params
from utils.object_retargeting import obj_retargeting, obj_retargeting_new_obj
from utils.contact import compute_contact_and_closest_point
from smplx.smplx.lbs import batch_rodrigues
from transforms3d.axangles import mat2axangle
import open3d as o3d
from optimization.utils import local_pose_to_global_orientation
from optimization.bvh2smplx import Simple_SMPLX, create_SMPLX_model
from utils.retargeting_visualization import HOI_visualization, obj_visualization
import matplotlib.pyplot as plt
import time
from utils.simplify_mesh import simplify_mesh
from utils.get_joints import get_joints, get_joints_no_datatype_change
from tink.transform_contact_info import tranfer_contact_to_new_obj, get_obj_path, vis_pcd
from tink.cal_contact_info import to_pointcloud
from tqdm import tqdm
import cv2
from moviepy.editor import VideoFileClip, clips_array
from utils.pyTorchChamferDistance.chamfer_distance import ChamferDistance
from utils.mesh import save_mesh


HAND_VERT_IDS = {
    'lthumb':		5361,
    'lindex':		4933,
    'lmiddle':		5058,
    'lring':		5169,
    'lpinky':		5286,
    'rthumb':		8079,
    'rindex':		7669,
    'rmiddle':		7794,
    'rring':		7905,
    'rpinky':		8022,
}

LEFT_VERT_IDS = {
    'lthumb':		5361,
    'lindex':		4933,
    'lmiddle':		5058,
    'lring':		5169,
    'lpinky':		5286,
}

RIGHT_VERT_IDS = {
    'rthumb':		8079,
    'rindex':		7669,
    'rmiddle':		7794,
    'rring':		7905,
    'rpinky':		8022,
}

model_path = "/share/human_model/models/smplx/SMPLX_NEUTRAL.npz"
model_data = np.load(model_path, allow_pickle=True)
data_struct = Struct(**model_data)
shapedirs = data_struct.shapedirs
v_template = data_struct.v_template
J_regressor = data_struct.J_regressor
parents = data_struct.kintree_table[0]
left_hand_components = data_struct.hands_componentsl[:12]
right_hand_components = data_struct.hands_componentsr[:12]

def map_obj_to_sdf(obj_name):
    obj_map = {
        "chair009": {"id": "h01chair9", "type": "chair"},
        "chair005": {"id": "h01chair5", "type": "chair"},
        "chair002": {"id": "h01chair2", "type": "chair"},
        "chair001": {"id": "h01chair1", "type": "chair"},
        "chair006": {"id": "h01chair6", "type": "chair"},
        "chair010": {"id": "h01chair10", "type": "chair"},
        "chair011": {"id": "h01chair11", "type": "chair"},
        "chair012": {"id": "h01chair12", "type": "chair"},
        "chair022": {"id": "h01Chair022", "type": "chair"},
        "desk001": {"id": "h02desk1", "type": "table"},
        "desk002": {"id": "h02desk2", "type": "table"},
        "desk003": {"id": "h02desk3", "type": "table"},
        "desk005": {"id": "h02desk5", "type": "table"},
        "desk007": {"id": "h02desk7", "type": "table"},
        "desk008": {"id": "h02desk8", "type": "table"},
        "desk009": {"id": "h02desk9", "type": "table"},
        "Desk020": {"id": "h02Desk020", "type": "table"},
        "Desk021": {"id": "h02Desk021", "type": "table"},
        "Desk023": {"id": "h02Desk023", "type": "table"}
    }
    return obj_map[obj_name]

def create_empty_SMPLX_params(N, N_betas=10, N_expression=10, N_hand_pca=12, device="cuda:0"):
    empty_SMPLX_params = {
        "betas": torch.zeros((N, N_betas), dtype=torch.float32).to(device),
        "expression": torch.zeros((N, N_expression), dtype=torch.float32).to(device),
        "global_orient": torch.zeros((N, 3), dtype=torch.float32).to(device),
        "transl": torch.zeros((N, 3), dtype=torch.float32).to(device),
        "body_pose": torch.zeros((N, 21, 3)).to(device),
        "left_hand_pose": torch.zeros((N, N_hand_pca)).to(device),
        "right_hand_pose": torch.zeros((N, N_hand_pca)).to(device),
    }
    return empty_SMPLX_params


class SMPLX_HH(nn.Module):
    def __init__(self, smplx_model, init_smplx_params, cfg, device="cuda:0"):
        super(SMPLX_HH, self).__init__()
        self.smplx_model = smplx_model
        self.smplx_expression = nn.Parameter(init_smplx_params["person1"]["expression"].clone().detach(), requires_grad=False)

        self.smplx_betas_person1 = nn.Parameter(init_smplx_params["person1"]["betas"].clone().detach(), requires_grad=False)
        self.smplx_global_orient_person1 = nn.Parameter(init_smplx_params["person1"]["global_orient"].clone().detach(), requires_grad=True)
        self.smplx_body_pose_person1 = nn.Parameter(init_smplx_params["person1"]["body_pose"].clone().detach(), requires_grad=True)
        self.smplx_left_hand_pose_person1 = nn.Parameter(init_smplx_params["person1"]["left_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_right_hand_pose_person1 = nn.Parameter(init_smplx_params["person1"]["right_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_transl_person1 = nn.Parameter(init_smplx_params["person1"]["transl"].clone().detach(), requires_grad=True)
        self.smplx_jaw_pose_person1 = nn.Parameter(torch.zeros([self.smplx_betas_person1.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_leye_pose_person1 = nn.Parameter(torch.zeros([self.smplx_betas_person1.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_reye_pose_person1 = nn.Parameter(torch.zeros([self.smplx_betas_person1.shape[0], 3], dtype=torch.float32), requires_grad=False)

        self.smplx_betas_person2 = nn.Parameter(init_smplx_params["person2"]["betas"].clone().detach(), requires_grad=False)
        self.smplx_global_orient_person2 = nn.Parameter(init_smplx_params["person2"]["global_orient"].clone().detach(), requires_grad=True)
        self.smplx_body_pose_person2 = nn.Parameter(init_smplx_params["person2"]["body_pose"].clone().detach(), requires_grad=True)
        self.smplx_left_hand_pose_person2 = nn.Parameter(init_smplx_params["person2"]["left_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_right_hand_pose_person2 = nn.Parameter(init_smplx_params["person2"]["right_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_transl_person2 = nn.Parameter(init_smplx_params["person2"]["transl"].clone().detach(), requires_grad=True)
        self.smplx_jaw_pose_person2 = nn.Parameter(torch.zeros([self.smplx_betas_person2.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_leye_pose_person2 = nn.Parameter(torch.zeros([self.smplx_betas_person2.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_reye_pose_person2 = nn.Parameter(torch.zeros([self.smplx_betas_person2.shape[0], 3], dtype=torch.float32), requires_grad=False)
        
        self.device = device
        
        # speed up
        self.shapedirs = to_tensor(to_np(shapedirs[:, :, :init_smplx_params["person1"]["betas"].shape[1]]), dtype=torch.float32).to(device)
        self.v_template = to_tensor(to_np(v_template), dtype=torch.float32).to(device)
        self.J_regressor = to_tensor(to_np(J_regressor), dtype=torch.float32).to(device)
        self.parents = to_tensor(to_np(parents)).long().to(device)
        self.parents[0] = -1
        self.extra_joint_parents = to_tensor(to_np([10, 10, 10, 11, 11, 11, 39, 27, 30, 36, 33, 54, 42, 45, 51, 48])).long().to(device)
        self.parents = torch.cat([self.parents, self.extra_joint_parents], dim=0)
        self.left_pca, self.right_pca = to_tensor(to_np(left_hand_components), dtype=torch.float32).to(device), to_tensor(to_np(right_hand_components), dtype=torch.float32).to(device)
        
    
    def forward(self):
        # result_joints_person1 = get_joints(self.smplx_global_orient_person1, self.smplx_betas_person1, self.smplx_body_pose_person1, self.smplx_transl_person1, self.smplx_left_hand_pose_person1, self.smplx_right_hand_pose_person1, left_hand_components, right_hand_components, shapedirs, v_template, J_regressor, parents)
        # result_joints_person2 = get_joints(self.smplx_global_orient_person2, self.smplx_betas_person2, self.smplx_body_pose_person2, self.smplx_transl_person2, self.smplx_left_hand_pose_person2, self.smplx_right_hand_pose_person2, left_hand_components, right_hand_components, shapedirs, v_template, J_regressor, parents)
        result_joints_person1 = get_joints_no_datatype_change(self.smplx_global_orient_person1, self.smplx_betas_person1, self.smplx_body_pose_person1, self.smplx_transl_person1, self.smplx_left_hand_pose_person1, self.smplx_right_hand_pose_person1, self.left_pca, self.right_pca, self.shapedirs, self.v_template, self.J_regressor, self.parents, device=self.device)
        result_joints_person2 = get_joints_no_datatype_change(self.smplx_global_orient_person2, self.smplx_betas_person2, self.smplx_body_pose_person2, self.smplx_transl_person2, self.smplx_left_hand_pose_person2, self.smplx_right_hand_pose_person2, self.left_pca, self.right_pca, self.shapedirs, self.v_template, self.J_regressor, self.parents, device=self.device)
        results = {
            "person1": {
                # "vertices": result_vertices_person1,
                "joints": result_joints_person1,
                "betas": self.smplx_betas_person1,
                "expression": self.smplx_expression,
                "global_orient": self.smplx_global_orient_person1,
                "transl": self.smplx_transl_person1,
                "body_pose": self.smplx_body_pose_person1,
                "left_hand_pose": self.smplx_left_hand_pose_person1,
                "right_hand_pose": self.smplx_right_hand_pose_person1,
            },
            "person2": {
                # "vertices": result_vertices_person2,
                "joints": result_joints_person2,
                "betas": self.smplx_betas_person2,
                "expression": self.smplx_expression,
                "global_orient": self.smplx_global_orient_person2,
                "transl": self.smplx_transl_person2,
                "body_pose": self.smplx_body_pose_person2,
                "left_hand_pose": self.smplx_left_hand_pose_person2,
                "right_hand_pose": self.smplx_right_hand_pose_person2,
            }
        }
        return results


def compute_contact_info(human_params, smplx_model, idx, obj_vertices, threshould=0.05, device="cuda:0"):
    """
    [input]
    * human_params: SMPLX params
    * idx: frame idx
    * obj_vertices: torch.float32, shape = (M, 3)
    
    human SMPLX mesh: shape = (N, 3)
    
    [return]
    * contact: torch.bool, shape = (N)
    * dist: torch.float32, shape = (N)
    * closest_point: torch.int64, shape = (N)
    """
    result_model = smplx_model(betas=human_params["betas"][idx:idx+1].detach().to(device), expression=human_params["expression"][idx:idx+1].detach().to(device), global_orient=human_params["global_orient"][idx:idx+1].detach().to(device), transl=human_params["transl"][idx:idx+1].detach().to(device), body_pose=human_params["body_pose"][idx:idx+1].detach().to(device), left_hand_pose=human_params["left_hand_pose"][idx:idx+1].detach().to(device), right_hand_pose=human_params["right_hand_pose"][idx:idx+1].detach().to(device), return_verts=True)
    human_vertices = result_model.vertices[0]  # human vertices
    contact, dist, closest_point = compute_contact_and_closest_point(human_vertices, obj_vertices, threshould=threshould)
    return contact, dist, closest_point


def naive_contact_loss(person_origin_contact_info, ids, new_human_vertices, new_obj_vertices):
    # get contact_info from original data
    contact_flag = person_origin_contact_info["contact"][ids]  # (B, N_human_vertex)
    dist = person_origin_contact_info["dist"][ids]  # (B, N_human_vertex)
    closest_point = person_origin_contact_info["closest_point"][ids]  # (B, N_human_vertex)
    B, N_human_vertex = closest_point.shape
    
    # contact loss
    # TODO: use contact_flag !!!
    rows = torch.arange(0, B).unsqueeze(1).repeat(1, N_human_vertex).reshape(-1).to(device)  # (B * N_human_vertex)
    # print(new_human_vertices.shape, new_obj_vertices.shape, rows.shape, closest_point.shape)
    real_diff = new_human_vertices - new_obj_vertices[rows, closest_point.reshape(-1)].reshape(B, N_human_vertex, 3)  # (B, N_human_vertex, 3)
    contact_loss = torch.sum(contact_flag * torch.abs(torch.sum(real_diff**2, dim=-1)**(0.5) - dist))
    return contact_loss

def naive_joint_contact_loss(person_origin_contact_info, ids, new_human_joints, new_obj_vertices, origin_human_joints):
    dist = person_origin_contact_info["dist"][ids]
    
    closest_point = person_origin_contact_info["closest_point"][ids]
    contact_flag = person_origin_contact_info["contact"][ids]
    B, N_human_vertex = closest_point.shape

    # contact loss
    hand_vertex_closest = closest_point[:, list(HAND_VERT_IDS.values())] # B x 10
    hand_vertex_dist = dist[:, list(HAND_VERT_IDS.values())] # B x 10
    new_obj_contact_vert = new_obj_vertices[torch.arange(B).unsqueeze(1), hand_vertex_closest] # B x 10 x 3
    ori_vec = origin_human_joints[:, 66:76, :] - new_obj_contact_vert
    hand_vertex_diff_vec = new_human_joints[:, 61:71, :] - new_obj_contact_vert # B x 10 x 3
    # print((torch.sum(hand_vertex_diff**2, dim=-1)**(0.5)).shape)
    hand_vertex_contact_loss = torch.sum((torch.sum(hand_vertex_diff_vec**2, dim=-1)**(0.5) - hand_vertex_dist)**2)
    # contact_vec_loss = torch.sum((hand_vertex_diff_vec - ori_vec)**2)
    return hand_vertex_contact_loss

def contact_loss_xrf(person_origin_contact_info, ids, new_joint, new_vert):
    # B x 10
    dist = person_origin_contact_info["dist"][ids]
    closest_point = person_origin_contact_info["closest_point"][ids]
    flag = person_origin_contact_info["contact"][ids]
    B, _ = closest_point.shape
    
    new_obj_contact_vert = new_vert[torch.arange(B).unsqueeze(1), closest_point] # B x 10 x 3
    hand_vertex_diff_vec = new_joint[:, 61:71, :] - new_obj_contact_vert # B x 10 x 3
    contact_loss = torch.sum(flag * torch.abs(torch.sum(hand_vertex_diff_vec**2, dim=-1)**(0.5) - dist))
    return contact_loss

def contact_loss_chamfer(person_origin_contact_info, ids, new_joint, new_vert):
    chamfer_dist = ChamferDistance()
    flag = person_origin_contact_info["contact"][ids]
    closest_point = person_origin_contact_info["closest_point"][ids]
    B, _ = closest_point.shape
    
    new_joint_pcd = new_joint[:, 61:71, :].float() # B x 10 x 3
    new_obj_contact_vert = new_vert[torch.arange(B).unsqueeze(1), closest_point].float()

    dist1, dist2 = chamfer_dist(new_joint_pcd, new_obj_contact_vert)
    return torch.mean(flag * dist1) + torch.mean(flag * dist2)



# new_contact_area["person1"] (B, contact_index)
def set_contact_area(person_origin_contact_info, ids, new_contact_area):
    """
    [input]
    * person_origin_contact_info: dict
    * ids: frame idx list
    * new_contact_area: B * N_human_vertex like new closest_point
    
    [return]
    * contact: torch.bool, shape = (N)
    * dist: torch.float32, shape = (N)
    * new_closest_point: torch.int64, shape = (N)
    """
    contact_flag = person_origin_contact_info["contact"][ids]
    dist = person_origin_contact_info["dist"][ids]
    closest_point = person_origin_contact_info["closest_point"][ids]
    B, N_human_vertex = closest_point.shape

    person_origin_contact_info["closest_point"][ids] = new_contact_area[ids]
    

def pointcloud_render(pointcloud, intrinsic, wrapper, index=None):
    image = np.zeros((900, 1200, 3))
    image[:] = [255,255,255]
    R = wrapper.cameras[0].R.squeeze().detach().cpu().numpy()
    T = wrapper.cameras[0].T.detach().cpu().numpy()
    extrinsic = np.eye(4)
    extrinsic[:3, :3] = R.T
    extrinsic[:3, 3] = T
    P = np.concatenate((pointcloud, np.ones((pointcloud.shape[0], 1))), axis=-1)
    P = P @ np.linalg.inv(extrinsic).T
    P = P[:, :3]
    uv = P @ intrinsic.T
    uv = uv[:, :2] / uv[:, 2:]
    uv = uv.astype(np.int32)
    # print(uv[:, 0].max(), uv[:, 0].min(), uv[:, 1].max(), uv[:, 1].min())
    for i in range(uv.shape[0]):
        if 0 <= uv[i, 0] < 900 and 0 <= uv[i, 1] < 1200:
            image[uv[i, 0], uv[i, 1]] = [255, 0, 0]
    if index is not None:
        for i in index:
            if 0 <= uv[i, 0] < 900 and 0 <= uv[i, 1] < 1200:
                image[uv[i, 0], uv[i, 1]] = [0, 0, 255]
    
    return image


def find_closest_point(p, vert):
    """
    p: a point, shape = (3,)
    vert: a point cloud, shape = (N, 3)
    
    return: the closest point index
    """
    dist = (((vert - p.reshape(1, 3))**2).sum(axis=-1))**(0.5)  # shape = (N,)
    return dist.argmin()


def HOI_retargeting_given_target_contact(multiperson_SMPLX_params, retarget_obj_vert_seq, new_contact_info, origin_contact_info, start_frame, end_frame, use_pca, num_pca_params, use_new_obj, cfg, epoch=1000, device="cuda:0"):
    # (3) human-only retargeting
    origin_smplx_params = {
        "person1": {
            "betas": multiperson_SMPLX_params["person1"]["betas"].detach().to(device),
            "expression": multiperson_SMPLX_params["person1"]["expression"].detach().to(device),
            "global_orient": multiperson_SMPLX_params["person1"]["global_orient"].detach().to(device),
            "transl": multiperson_SMPLX_params["person1"]["transl"].detach().to(device),
            "body_pose": multiperson_SMPLX_params["person1"]["body_pose"].detach().to(device),
            "left_hand_pose": multiperson_SMPLX_params["person1"]["left_hand_pose"].detach().to(device),
            "right_hand_pose": multiperson_SMPLX_params["person1"]["right_hand_pose"].detach().to(device),
            "joints": multiperson_SMPLX_params["person1"]["joints"].detach().to(device),
        },
        "person2": {
            "betas": multiperson_SMPLX_params["person2"]["betas"].detach().to(device),
            "expression": multiperson_SMPLX_params["person2"]["expression"].detach().to(device),
            "global_orient": multiperson_SMPLX_params["person2"]["global_orient"].detach().to(device),
            "transl": multiperson_SMPLX_params["person2"]["transl"].detach().to(device),
            "body_pose": multiperson_SMPLX_params["person2"]["body_pose"].detach().to(device),
            "left_hand_pose": multiperson_SMPLX_params["person2"]["left_hand_pose"].detach().to(device),
            "right_hand_pose": multiperson_SMPLX_params["person2"]["right_hand_pose"].detach().to(device),
            "joints": multiperson_SMPLX_params["person2"]["joints"].detach().to(device),
        }
    }
    init_smplx_params = None  # 第一个batch用origin_smplx_params初始化
    
    # parameters in local/global reality loss
    # strong_relative_joints = np.int32([15, 16, 17, 18, 19, 20, 21])  # 在joints里的编号(0-127, 0是root)
    strong_relative_joints = np.int32([15, 16, 17, 20, 21])  # 在joints里的编号(0-127, 0是root)
    arm_joints = np.int32([18, 19])
    # weak_relative_joints = [i for i in range(0, 127) if i-1 not in strong_relative_joint]
    weak_relative_joints = np.int32([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])  # 在joints里的编号(0-127, 0是root)
    feet_joints = np.int32([7, 8, 10, 11, 60, 61, 62, 63, 64, 65])
    # feet_joints = np.int32([7, 8, 10, 11, 55, 56, 57, 58, 59, 60])
    left_foot_joints = np.int32([7, 10, 60, 61, 62])
    right_foot_joints = np.int32([8, 11, 63, 64, 65])
    # left_foot_joints = np.int32([7, 10, 55, 56, 57])
    # right_foot_joints = np.int32([8, 11, 58, 59, 60])
    left_feet_ground_joints = np.int32([60, 61, 62])
    # left_feet_ground_joints = np.int32([57])
    right_feet_ground_joints = np.int32([63, 64, 65])
    # right_feet_ground_joints = np.int32([60])
    feet_ground_joints = np.int32([62, 65])
    head_joint = np.int32([15])
    hand_joints= np.int32([20, 21])
    lambda_regularizer, lambda_smoothness, lambda_reality, lambda_contact, lambda_v, lambda_feet, lambda_arm_loss, lambda_hta = 1e-1, 1, [1e-2, 1e-1, 3e-2], 1000.0, 1.0, 1, 1e-3, 1e-2
    # lambda_regularizer, lambda_smoothness, lambda_reality, lambda_contact, lambda_v, lambda_feet, lambda_arm_loss, lambda_hta = 0.0, 10.0, [0.0, 0.0, 0.0], 1.0, 0.0, 0.0, 0.0, 0.0
    learning_rate = 1e-2

    print("strong_relative_joints =", strong_relative_joints)
    print("weak_relative_joints =", weak_relative_joints)
    print("feet_joints =", feet_joints)
    print("lambda_regularizer =", lambda_regularizer)
    print("lambda_smoothness =", lambda_smoothness)
    print("lambda_reality =", lambda_reality)
    print("lambda_contact =", lambda_contact)
    print("lambda_v =", lambda_v)
    print("lambda_feet =", lambda_feet)
    print("lambda_arm_loss =", lambda_arm_loss)
    print("learning_rate =", learning_rate)
    
    M = end_frame - start_frame  # batch size
    # M = 100
    # M = 50
    # N_epoch = 20000
    N_epoch = epoch
    result_smplx_params = []
    result_params = None
    for i in range(start_frame, end_frame, M):
        ids = np.arange(i, min(i+M, end_frame))
        L = ids.shape[0]
        print("------------ optimizing frame {} - {} -----------------------".format(str(i), str(i+L-1)))
        # initialization
        sequence_init_smplx_params = {}
        if i == start_frame:  # the first batch
            for person in origin_smplx_params:
                sequence_init_smplx_params[person] = {
                    "betas": origin_smplx_params[person]["betas"][ids - start_frame, :].clone().detach(),
                    "expression": origin_smplx_params[person]["expression"][ids - start_frame, :].clone().detach(),
                    "global_orient": origin_smplx_params[person]["global_orient"][ids - start_frame, :].clone().detach(),
                    "transl": origin_smplx_params[person]["transl"][ids - start_frame, :].clone().detach(),
                    "body_pose": origin_smplx_params[person]["body_pose"][ids - start_frame, :].clone().detach(),
                    "left_hand_pose": origin_smplx_params[person]["left_hand_pose"][ids - start_frame, :].clone().detach(),
                    "right_hand_pose": origin_smplx_params[person]["right_hand_pose"][ids - start_frame, :].clone().detach(),
                }
        else:  # not the first batch
            for person in init_smplx_params:
                sequence_init_smplx_params[person] = {}
                for key in init_smplx_params[person]:
                    sequence_init_smplx_params[person][key] = init_smplx_params[person][key].expand((L,) + init_smplx_params[person][key].shape[1:]).clone()

        # speed-up
        sequence_origin_smplx_params = {}
        for person in ["person1", "person2"]:
            sequence_origin_smplx_params[person] = {}
            for key in origin_smplx_params[person]:
                sequence_origin_smplx_params[person][key] = origin_smplx_params[person][key][ids - start_frame].clone().detach()

        smplx_model = create_SMPLX_model(use_pca=use_pca, num_pca_comps=num_pca_params, batch_size=L, device=device)
        optim_model = SMPLX_HH(smplx_model, sequence_init_smplx_params, cfg, device=device)
        optim_model.to(device)
        optimizer = torch.optim.Adam(optim_model.parameters(), lr=learning_rate)
        optim_model.train()

        # TODO batch smoothness loss
        loss_rec = []

        epoch_start_time = time.perf_counter()

        # p1_left_if_onground = torch.ge(origin_smplx_params["person1"]["joints"][ids-start_frame][:, left_feet_ground_joints, 1], 0.05).repeat(1, 5).unsqueeze(2).repeat(1, 1, 2)
        # p1_right_if_onground = torch.ge(origin_smplx_params["person1"]["joints"][ids-start_frame][:, right_feet_ground_joints, 1], 0.05).repeat(1, 5).unsqueeze(2).repeat(1, 1, 2)
        # p2_left_if_onground = torch.ge(origin_smplx_params["person2"]["joints"][ids-start_frame][:, left_feet_ground_joints, 1], 0.05).repeat(1, 5).unsqueeze(2).repeat(1, 1, 2)
        # p2_right_if_onground = torch.ge(origin_smplx_params["person2"]["joints"][ids-start_frame][:, right_feet_ground_joints, 1], 0.05).repeat(1, 5).unsqueeze(2).repeat(1, 1, 2)
        # x_z = torch.tensor([0, 2]).to(device)

        result_params = None

        for epoch in range(N_epoch):
            optimizer.zero_grad()
            results = optim_model()
            
            joints_p1, body_pose_p1, left_hand_pose_p1, right_hand_pose_p1, transl_p1, global_orient_p1 = results["person1"]["joints"], results["person1"]["body_pose"], results["person1"]["left_hand_pose"], results["person1"]["right_hand_pose"], results["person1"]["transl"], results["person1"]["global_orient"]
            joints_p2, body_pose_p2, left_hand_pose_p2, right_hand_pose_p2, transl_p2, global_orient_p2 = results["person2"]["joints"], results["person2"]["body_pose"], results["person2"]["left_hand_pose"], results["person2"]["right_hand_pose"], results["person2"]["transl"], results["person2"]["global_orient"]
            
            regularizer_p1 = torch.sum(body_pose_p1**2) * 1e-3 + torch.sum(left_hand_pose_p1**2) * 1e-4 + torch.sum(right_hand_pose_p1**2) * 1e-4
            regularizer_p2 = torch.sum(body_pose_p2**2) * 1e-3 + torch.sum(left_hand_pose_p2**2) * 1e-4 + torch.sum(right_hand_pose_p2**2) * 1e-4
            
            # acceleration
            smoothness_p1 = (torch.sum((2 * body_pose_p1[1:-1] - body_pose_p1[:-2] - body_pose_p1[2:])**2) + torch.sum((2 * left_hand_pose_p1[1:-1] - left_hand_pose_p1[:-2] - left_hand_pose_p1[2:])**2) + torch.sum((2 * right_hand_pose_p1[1:-1] - right_hand_pose_p1[:-2] - right_hand_pose_p1[2:])**2)) + torch.sum((2 * transl_p1[1:-1] - transl_p1[:-2] - transl_p1[2:])**2) + torch.sum((2 * global_orient_p1[1:-1] - global_orient_p1[:-2] - global_orient_p1[2:])**2)
            smoothness_p2 = (torch.sum((2 * body_pose_p2[1:-1] - body_pose_p2[:-2] - body_pose_p2[2:])**2) + torch.sum((2 * left_hand_pose_p2[1:-1] - left_hand_pose_p2[:-2] - left_hand_pose_p2[2:])**2) + torch.sum((2 * right_hand_pose_p2[1:-1] - right_hand_pose_p2[:-2] - right_hand_pose_p2[2:])**2)) + torch.sum((2 * transl_p2[1:-1] - transl_p2[:-2] - transl_p2[2:])**2) + torch.sum((2 * global_orient_p2[1:-1] - global_orient_p2[:-2] - global_orient_p2[2:])**2)
            
            if (i > start_frame) and (L >= 2):
                smoothness_to_init_p1 = (torch.sum((2 * body_pose_p1[0] - result_smplx_params[-1]["person1"]["body_pose"][0].detach() - body_pose_p1[1])**2) + torch.sum((2 * left_hand_pose_p1[0] - result_smplx_params[-1]["person1"]["left_hand_pose"][0].detach() - left_hand_pose_p1[1])**2) + torch.sum((2 * right_hand_pose_p1[0] - result_smplx_params[-1]["person1"]["right_hand_pose"][0].detach() - right_hand_pose_p1[1])**2))  # acceleration
                smoothness_to_init_p2 = (torch.sum((2 * body_pose_p2[0] - result_smplx_params[-1]["person2"]["body_pose"][0].detach() - body_pose_p2[1])**2) + torch.sum((2 * left_hand_pose_p2[0] - result_smplx_params[-1]["person2"]["left_hand_pose"][0].detach() - left_hand_pose_p2[1])**2) + torch.sum((2 * right_hand_pose_p2[0] - result_smplx_params[-1]["person2"]["right_hand_pose"][0].detach() - right_hand_pose_p2[1])**2))  # acceleration
            else:
                smoothness_to_init_p1 = 0.0
                smoothness_to_init_p2 = 0.0
            
            # L2 loss
            local_reality_loss_p1 = torch.sum((body_pose_p1[:, strong_relative_joints - 1] - sequence_origin_smplx_params["person1"]["body_pose"][:, strong_relative_joints - 1])**2) + torch.sum((left_hand_pose_p1 - sequence_origin_smplx_params["person1"]["left_hand_pose"])**2) + torch.sum((right_hand_pose_p1 - sequence_origin_smplx_params["person1"]["right_hand_pose"])**2)
            local_reality_loss_p2 = torch.sum((body_pose_p2[:, strong_relative_joints - 1] - sequence_origin_smplx_params["person2"]["body_pose"][:, strong_relative_joints - 1])**2) + torch.sum((left_hand_pose_p2 - sequence_origin_smplx_params["person2"]["left_hand_pose"])**2) + torch.sum((right_hand_pose_p2 - sequence_origin_smplx_params["person2"]["right_hand_pose"])**2)
            arm_loss_p1 = torch.sum((body_pose_p1[:, arm_joints] - sequence_origin_smplx_params["person1"]["body_pose"][:, arm_joints])**2)
            arm_loss_p2 = torch.sum((body_pose_p2[:, arm_joints] - sequence_origin_smplx_params["person2"]["body_pose"][:, arm_joints])**2)
            local_rotation_reality_loss = local_reality_loss_p1 + local_reality_loss_p2
            arm_loss = arm_loss_p1 + arm_loss_p2
           
            # 胳膊上的关节相对胸口关节的平移向量的loss
            strong_relative_vector_p1 = joints_p1[:, strong_relative_joints] - joints_p1[:, 9].unsqueeze(1).repeat(1, len(strong_relative_joints), 1)
            gt_relative_vector_p1 = sequence_origin_smplx_params["person1"]["joints"][:, strong_relative_joints] - sequence_origin_smplx_params["person1"]["joints"][:, 9].unsqueeze(1).repeat(1, len(strong_relative_joints), 1)
            local_relative_reality_loss_p1 = torch.sum((strong_relative_vector_p1 - gt_relative_vector_p1)**2)
            strong_relative_vector_p2 = joints_p2[:, strong_relative_joints] - joints_p2[:, 9].unsqueeze(1).repeat(1, len(strong_relative_joints), 1)
            gt_relative_vector_p2 = sequence_origin_smplx_params["person2"]["joints"][:, strong_relative_joints] - sequence_origin_smplx_params["person2"]["joints"][:, 9].unsqueeze(1).repeat(1, len(strong_relative_joints), 1)
            local_relative_reality_loss_p2 = torch.sum((strong_relative_vector_p2 - gt_relative_vector_p2)**2)
            local_relative_reality_loss = local_relative_reality_loss_p1 + local_relative_reality_loss_p2
            
            weak_relative_vector_p1 = joints_p1[:, weak_relative_joints] - joints_p1[:, 0].unsqueeze(1).repeat(1, len(weak_relative_joints), 1)
            gt_relative_vector_p1 = sequence_origin_smplx_params["person1"]["joints"][:, weak_relative_joints] - sequence_origin_smplx_params["person1"]["joints"][:, 0].unsqueeze(1).repeat(1, len(weak_relative_joints), 1)
            global_relative_reality_loss_p1 = torch.sum((weak_relative_vector_p1 - gt_relative_vector_p1)**2)
            weak_relative_vector_p2 = joints_p2[:, weak_relative_joints] - joints_p2[:, 0].unsqueeze(1).repeat(1, len(weak_relative_joints), 1)
            gt_relative_vector_p2 = sequence_origin_smplx_params["person2"]["joints"][:, weak_relative_joints] - sequence_origin_smplx_params["person2"]["joints"][:, 0].unsqueeze(1).repeat(1, len(weak_relative_joints), 1)
            global_relative_reality_loss_p2 = torch.sum((weak_relative_vector_p2 - gt_relative_vector_p2)**2)
            
            global_relative_reality_loss = global_relative_reality_loss_p1 + global_relative_reality_loss_p2
            
            # left_arm_joints = [18, 20]
            # right_arm_joints = [19, 17]
            # test_vec_p1_left = joints_p1[:, left_arm_joints] - joints_p1[:, 16].unsqueeze(1).repeat(1, len(left_arm_joints), 1)
            # gt_vec_p1_left = origin_smplx_params["person1"]["joints"][ids-start_frame][:, left_arm_joints] - origin_smplx_params["person1"]["joints"][ids-start_frame][:, 16].unsqueeze(1).repeat(1, len(left_arm_joints), 1)
            # test_vec_p2_left = joints_p2[:, left_arm_joints] - joints_p2[:, 16].unsqueeze(1).repeat(1, len(left_arm_joints), 1)
            # gt_vec_p2_left = origin_smplx_params["person2"]["joints"][ids-start_frame][:, left_arm_joints] - origin_smplx_params["person2"]["joints"][ids-start_frame][:, 16].unsqueeze(1).repeat(1, len(left_arm_joints), 1)
            # test_vec_p1_right = joints_p1[:, right_arm_joints] - joints_p1[:, 17].unsqueeze(1).repeat(1, len(right_arm_joints), 1)
            # gt_vec_p1_right = origin_smplx_params["person1"]["joints"][ids-start_frame][:, right_arm_joints] - origin_smplx_params["person1"]["joints"][ids-start_frame][:, 17].unsqueeze(1).repeat(1, len(right_arm_joints), 1)
            # test_vec_p2_right = joints_p2[:, right_arm_joints] - joints_p2[:, 17].unsqueeze(1).repeat(1, len(right_arm_joints), 1)
            # gt_vec_p2_right = origin_smplx_params["person2"]["joints"][ids-start_frame][:, right_arm_joints] - origin_smplx_params["person2"]["joints"][ids-start_frame][:, 17].unsqueeze(1).repeat(1, len(right_arm_joints), 1)
            # test_loss_p1 = torch.sum((test_vec_p1_left - gt_vec_p1_left)**2) + 
            # test_loss_p2 = torch.sum((test_vec_p2_left - gt_vec_p2_left)**2)
            
            # xrf 0927 希望加一个以contact为中心的矫正头部的loss 模拟以contact area为中心
            gt_vector_hta_p1 = sequence_origin_smplx_params["person1"]["joints"][:, head_joint].repeat(1, len(hand_joints), 1) - sequence_origin_smplx_params["person1"]["joints"][:, hand_joints]
            hta_p1 = joints_p1[:, head_joint].repeat(1, len(hand_joints), 1) - joints_p1[:, hand_joints]
            hta_loss_p1 = torch.sum((hta_p1 - gt_vector_hta_p1)**2)
            gt_vector_hta_p2 = sequence_origin_smplx_params["person2"]["joints"][:, head_joint].repeat(1, len(hand_joints), 1) - sequence_origin_smplx_params["person2"]["joints"][:, hand_joints]
            hta_p2 = joints_p2[:, head_joint].repeat(1, len(hand_joints), 1) - joints_p2[:, hand_joints]
            hta_loss_p2 = torch.sum((hta_p2 - gt_vector_hta_p2)**2)
            hta_loss = hta_loss_p1 + hta_loss_p2
            # hta_loss = torch.tensor(0.0)

            v_loss_p1 = torch.sum(torch.abs(transl_p1[1:] - transl_p1[:-1] - (sequence_origin_smplx_params["person1"]["transl"][1:] - sequence_origin_smplx_params["person1"]["transl"][:-1])))
            v_loss_p2 = torch.sum(torch.abs(transl_p2[1:] - transl_p2[:-1] - (sequence_origin_smplx_params["person2"]["transl"][1:] - sequence_origin_smplx_params["person2"]["transl"][:-1])))
            if (i > start_frame) and (L >= 2):
                v_loss_to_init_p1 = torch.sum(torch.abs(transl_p1[0] - result_smplx_params[-1]["person1"]["transl"][0] - (sequence_origin_smplx_params["person1"]["transl"][0] - origin_smplx_params["person1"]["transl"][i-start_frame-1].detach()))) * (M / 5)
                v_loss_to_init_p2 = torch.sum(torch.abs(transl_p2[0] - result_smplx_params[-1]["person2"]["transl"][0] - (sequence_origin_smplx_params["person2"]["transl"][0] - origin_smplx_params["person2"]["transl"][i-start_frame-1].detach()))) * (M / 5)
            else:
                v_loss_to_init_p1 = 0.0
                v_loss_to_init_p2 = 0.0
            
            # feet loss
            # 判断脚是否在地上 如果在地上就对所有的脚部关节的速度进行惩罚
            feet_loss_p1 = torch.sum((joints_p1[:, feet_ground_joints - 5, 1] - sequence_origin_smplx_params["person1"]["joints"][:, feet_ground_joints, 1])**2)
            feet_loss_p2 = torch.sum((joints_p2[:, feet_ground_joints - 5, 1] - sequence_origin_smplx_params["person2"]["joints"][:, feet_ground_joints, 1])**2)

            # liuyun:
            # new obj vertices
            new_obj_vertices = retarget_obj_vert_seq[ids-start_frame]  # (L, N_obj, 3)

            # xrf 
            # 已经有了手部的vertex idx，我们可以得到closest point的dist 然后joints对在retargeting之后obj上的同样的点计算dist loss
            # contact_p1_loss = naive_joint_contact_loss(new_contact_info["person1"], ids-start_frame, joints_p1, new_obj_vertices, origin_smplx_params["person1"]["joints"])  # person 1 contact loss
            # contact_p2_loss = naive_joint_contact_loss(new_contact_info["person2"], ids-start_frame, joints_p2, new_obj_vertices, origin_smplx_params["person2"]["joints"])
            # print(new_contact_info["person1"]["dist"][ids-start_frame].shape, new_contact_info["person1"]["closest_point"][ids-start_frame].shape)
            # closest_point = new_contact_info["person1"]["closest_point"][ids-start_frame]
            if use_new_obj:
                # contact_p1_loss = contact_loss_xrf(new_contact_info["person1"], ids-start_frame, joints_p1, new_obj_vertices)  # person 1 contact loss
                # contact_p2_loss = contact_loss_xrf(new_contact_info["person2"], ids-start_frame, joints_p2, new_obj_vertices)
                contact_p1_loss = contact_loss_chamfer(new_contact_info["person1"], ids-start_frame, joints_p1, new_obj_vertices)
                contact_p2_loss = contact_loss_chamfer(new_contact_info["person2"], ids-start_frame, joints_p2, new_obj_vertices)
            else:
                contact_p1_loss = naive_joint_contact_loss(origin_contact_info["person1"], ids-start_frame, joints_p1, new_obj_vertices, origin_smplx_params["person1"]["joints"])  # person 1 contact loss
                contact_p2_loss = naive_joint_contact_loss(origin_contact_info["person2"], ids-start_frame, joints_p2, new_obj_vertices, origin_smplx_params["person2"]["joints"])
            regularizer = regularizer_p1 + regularizer_p2
            smoothness = smoothness_p1 + smoothness_p2  + smoothness_to_init_p1 + smoothness_to_init_p2
            contact_loss = contact_p1_loss + contact_p2_loss
            # contact_loss = torch.tensor(0.0)
            v_loss = v_loss_p1 + v_loss_p2 + v_loss_to_init_p1 + v_loss_to_init_p2
            feet_loss = feet_loss_p1 + feet_loss_p2
            # feet_loss = feet_v_loss_p1 + feet_v_loss_p2 + feet_loss_p1 + feet_loss_p2

            loss = lambda_regularizer * regularizer + lambda_smoothness * smoothness \
                + lambda_reality[0] * local_rotation_reality_loss \
                + lambda_reality[1] * local_relative_reality_loss + lambda_reality[2] * global_relative_reality_loss \
                + lambda_contact * contact_loss + lambda_v * v_loss + lambda_feet * feet_loss + lambda_arm_loss * arm_loss + lambda_hta * hta_loss
            if epoch % 100 == 0 or (epoch == N_epoch - 1):
                print(epoch, loss.item(), lambda_regularizer * regularizer.item(), lambda_smoothness * smoothness.item(), \
                    lambda_reality[0] * local_rotation_reality_loss.item(), \
                    lambda_reality[1] * local_relative_reality_loss.item(), lambda_reality[2] * global_relative_reality_loss.item(), \
                    lambda_contact * contact_loss.item(), lambda_v * v_loss.item(), lambda_feet * feet_loss.item(), lambda_arm_loss * arm_loss.item(), lambda_hta * hta_loss.item())
            loss_rec.append(loss.item())
            loss.backward()
            optimizer.step()
            if loss.item() < 0.5 and epoch > 1000:
                print("loss < 0.5, break")
                break
        
        optim_model.eval()
        results = optim_model()
        epoch_end_time = time.perf_counter()
        print("time: {}s".format(str(epoch_end_time - epoch_start_time)))

        for j in range(L):
            result_smplx_params.append({
                "person1": {
                    "betas": results["person1"]["betas"][j:j+1].detach(),
                    "expression": results["person1"]["expression"][j:j+1].detach(),
                    "global_orient": results["person1"]["global_orient"][j:j+1].detach(),
                    "transl": results["person1"]["transl"][j:j+1].detach(),
                    "body_pose": results["person1"]["body_pose"][j:j+1].detach(),
                    "left_hand_pose": results["person1"]["left_hand_pose"][j:j+1].detach(),
                    "right_hand_pose": results["person1"]["right_hand_pose"][j:j+1].detach(),
                },
                "person2": {
                    "betas": results["person2"]["betas"][j:j+1].detach(),
                    "expression": results["person2"]["expression"][j:j+1].detach(),
                    "global_orient": results["person2"]["global_orient"][j:j+1].detach(),
                    "transl": results["person2"]["transl"][j:j+1].detach(),
                    "body_pose": results["person2"]["body_pose"][j:j+1].detach(),
                    "left_hand_pose": results["person2"]["left_hand_pose"][j:j+1].detach(),
                    "right_hand_pose": results["person2"]["right_hand_pose"][j:j+1].detach(),
                },
            })
        
        init_smplx_params = {
            "person1": {
                "betas": results["person1"]["betas"][-1:].detach(),
                "expression": results["person1"]["expression"][-1:].detach(),
                "global_orient": results["person1"]["global_orient"][-1:].detach(),
                "transl": results["person1"]["transl"][-1:].detach(),
                "body_pose": results["person1"]["body_pose"][-1:].detach(),
                "left_hand_pose": results["person1"]["left_hand_pose"][-1:].detach(),
                "right_hand_pose": results["person1"]["right_hand_pose"][-1:].detach(),
            },
            "person2": {
                "betas": results["person2"]["betas"][-1:].detach(),
                "expression": results["person2"]["expression"][-1:].detach(),
                "global_orient": results["person2"]["global_orient"][-1:].detach(),
                "transl": results["person2"]["transl"][-1:].detach(),
                "body_pose": results["person2"]["body_pose"][-1:].detach(),
                "left_hand_pose": results["person2"]["left_hand_pose"][-1:].detach(),
                "right_hand_pose": results["person2"]["right_hand_pose"][-1:].detach(),
            }
        }
    
    return result_smplx_params
    

def HOI_retargeting(data_dir, save_dir, origin_mesh, ori_obj_name, obj_mesh, obj_poses, start_frame, end_frame, cfg, device="cuda:0", use_new_obj=False, new_obj_name=None, contact_vis=False, human_pose_save_path=None, object_pose_save_path=None, object_mesh_save_path=None, obj_dataset_dir=None, contact_pool_path=None):
    """
    目前这一环节只优化human pose
    """
    assert not new_obj_name is None
    
    start_time = time.perf_counter()
    os.makedirs(save_dir, exist_ok=True)
    use_pca, num_pca_params = True, 12

    # (1) load gt human poses and simplify obj_mesh
    try:
        multiperson_SMPLX_params = load_multiperson_smplx_params(join(data_dir, "SMPLX_fitting"), start_frame=start_frame, end_frame=end_frame, device=device)
    except Exception as e:
        raise e

    print(multiperson_SMPLX_params["person1"]["body_pose"].shape)
    print(multiperson_SMPLX_params.keys(), multiperson_SMPLX_params["person2"].keys(), multiperson_SMPLX_params["person2"]["body_pose"].shape, multiperson_SMPLX_params["person1"]["joints"].shape)

    count_time = time.perf_counter()
    print("time for loading data and simplifying meshes: {}s".format(str(count_time - start_time)))
    
    # (2) compute per-frame HOH meshes and compute contact

    if use_new_obj:
        target_pcd = to_pointcloud(obj_mesh)
        vert = np.asarray(target_pcd.points)
    else:
        vert, face = obj_mesh.vertices, obj_mesh.faces

    gt_obj_rot_vec = []
    gt_obj_trans_vec = []
    # 读取object每帧的rotation matrix和translation vector，然后计算每帧的vertices
    for i in range(start_frame, end_frame):
        gt_obj_rot_vec.append(obj_poses[i]["rotation"].cpu().numpy())
        gt_obj_trans_vec.append(obj_poses[i]["translation"].cpu().numpy())
    gt_obj_rot_vec = torch.tensor(gt_obj_rot_vec, dtype=torch.float64).to(device).squeeze(1)
    print(gt_obj_rot_vec.shape)
    # * 
    gt_obj_rot_mat = batch_rodrigues(gt_obj_rot_vec).cpu().numpy()

    gt_obj_trans_vec = np.array(gt_obj_trans_vec)
    retarget_obj_vert_seq = (np.array(gt_obj_rot_mat @ vert.T)).transpose(0, 2, 1) + gt_obj_trans_vec
    retarget_obj_vert_seq = torch.from_numpy(retarget_obj_vert_seq).to(device)  # (N, 3)

    origin_vert, origin_face = origin_mesh.vertices, origin_mesh.faces
    object_dir = join(data_dir, 'aligned_objposes.npy')
    origin_pose = np.load(object_dir, allow_pickle=True)[start_frame:end_frame]
    origin_vert_seq = (np.array(origin_pose[:, :3, :3] @ origin_vert.T)).transpose(0, 2, 1) + np.expand_dims(origin_pose[:, :3, 3], axis=1)
    origin_vert_seq = torch.from_numpy(origin_vert_seq).to(device)  # (N, 3)
    
    ######################################################################
    # preprocess contact areas 10675 x 3
    print("start preparing contact areas ...")
    origin_contact_info = {
        "person1": {"contact": [], "dist": [], "closest_point": []},
        "person2": {"contact": [], "dist": [], "closest_point": []},
    }
    contact_threshould=0.05
    # contact_threshould = 0.01
    smplx_model = create_SMPLX_model(use_pca=use_pca, num_pca_comps=num_pca_params, batch_size=1, device=device)

    for idx in range(0, end_frame - start_frame):
        # person1 to original obj
        
        contact, dist, closest_point = compute_contact_info(multiperson_SMPLX_params["person1"], smplx_model, idx, origin_vert_seq[idx], threshould=contact_threshould, device=device)
        origin_contact_info["person1"]["contact"].append(contact.detach().cpu().numpy())
        origin_contact_info["person1"]["dist"].append(dist.detach().cpu().numpy())
        origin_contact_info["person1"]["closest_point"].append(closest_point.detach().cpu().numpy())
        
        # person2 to original obj
        contact, dist, closest_point = compute_contact_info(multiperson_SMPLX_params["person2"], smplx_model, idx, origin_vert_seq[idx], threshould=contact_threshould, device=device)
        origin_contact_info["person2"]["contact"].append(contact.detach().cpu().numpy())
        origin_contact_info["person2"]["dist"].append(dist.detach().cpu().numpy())
        origin_contact_info["person2"]["closest_point"].append(closest_point.detach().cpu().numpy())
        

    for person in origin_contact_info:
        origin_contact_info[person]["contact"] = torch.tensor(origin_contact_info[person]["contact"], dtype=torch.bool).to(device)
        origin_contact_info[person]["dist"] = torch.tensor(origin_contact_info[person]["dist"], dtype=torch.float32).to(device)
        origin_contact_info[person]["closest_point"] = torch.tensor(origin_contact_info[person]["closest_point"], dtype=torch.int64).to(device)
    print("finish preparing contact areas !!!")

    print(origin_contact_info["person1"]["contact"].shape, origin_contact_info["person1"]["dist"].shape, origin_contact_info["person1"]["closest_point"].shape)
    
    # contact pool
    # if contact_pool_path is not None:
    #     contact_pool = np.load(contact_pool_path, allow_pickle=True).item()
    #     target_contact_pool = contact_pool.copy()
    #     for person in contact_pool:
    #         for hand_name in contact_pool[person]:

    #     person1_left, person1_right = contact_pool["person1"]["left_hand"], contact_pool["person1"]["right_hand"]
    #     person2_left, person2_right = contact_pool["person2"]["left_hand"], contact_pool["person2"]["right_hand"]
    #     _, target_contact_pool = tranfer_contact_to_new_obj("/home/liuyun/HHO-dataset/data_processing/Tink/DeepSDF_OakInk/data/sdf/", map_obj_to_sdf(ori_obj_name)["id"], new_obj_name, origin_mesh, map_obj_to_sdf(ori_obj_name)["type"], source_contact, person, visualize=True, vis_save_file=join(save_dir, new_obj_name + "_contact"))
    ###########################################################################################################################
    # liuyun: visualize original contact areas (for reference motion)
    
    # intrinsic = np.array([[-600, 0, 640], [0, -600, 360], [0, 0, 1]])
    # pyt3d_wrapper_view1 = Pyt3DWrapper(image_size=(1200, 900), use_fixed_cameras=False, eyes=[np.float32([0.0, -4.0, 4.0])], intrin=intrinsic, device="cuda:0")
    
    # hand_vertex_indices
    hand_vertex_indices = list(HAND_VERT_IDS.values())
    assert len(hand_vertex_indices) == 10
    
    if contact_vis:
        for person in origin_contact_info:
            contact_o = origin_contact_info[person]["closest_point"]
            
            origin_pcd = o3d.geometry.PointCloud()
            origin_pcd.points = o3d.utility.Vector3dVector(origin_vert)
            pcd_tree = o3d.geometry.KDTreeFlann(origin_pcd)
            # origin_vert: origin_mesh.vertices (gt motion的obj mesh的顶点)
            draw_vert = (origin_vert @ np.array([[-0.01238667,-0.02917804,0.99949747],[-0.02601937,0.9992451,0.02884821],[-0.99958469,-0.02564896,-0.01313651]]).T+np.array([[-0.48912618,0.3995658,-0.29797912]])) @ np.array([[0, -1, 0], [-0.966, 0, 0.26], [-0.26, 0, -0.966]]) + np.array([[1.0, 1.0, 1.0]])
            imgs_original_contact_visualization = []
            
            for idx in range(start_frame, end_frame):
                all_index = []
                for i in range(10):
                    [k, index, _] = pcd_tree.search_radius_vector_3d(origin_pcd.points[contact_o[idx][hand_vertex_indices[i]]], 0.02)
                    all_index.extend(index)
                img1 = pointcloud_render(draw_vert, intrinsic, pyt3d_wrapper_view1, index=all_index)
                img1 = img1[450:, 400:1000, :]
                img1 = cv2.resize(img1, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
                imgs_original_contact_visualization.append(img1.astype(np.uint8))
                
            print("save video ...")
            fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            videoWriter = cv2.VideoWriter(join(data_dir, f"{person}_gt_contact.mp4"), fourcc, 10, (1200, 900))
            for frame in imgs_original_contact_visualization:
                videoWriter.write(frame)
            videoWriter.release()
            print(f"done visualizing gt contact areas to {join(data_dir, f'{person}_gt_contact.mp4')} !!!")
    
    ###########################################################################################################################
    
    if use_new_obj:
        target_pcd = to_pointcloud(obj_mesh)
        print("transfering contact areas ...")

        contact_points = {
            "person1": {"dist": [], "closest_point": [], "contact": []},
            "person2": {"dist": [], "closest_point": [], "contact": []},
        }

        for person in contact_points:
            contact_points[person]["dist"] = origin_contact_info[person]["dist"][:, list(HAND_VERT_IDS.values())].detach().cpu().numpy() # N x 10
            contact_points[person]["closest_point"] = origin_contact_info[person]["closest_point"][:, list(HAND_VERT_IDS.values())].detach().cpu().numpy() # N x 10
            contact_points[person]["contact"] = origin_contact_info[person]["contact"][:, list(HAND_VERT_IDS.values())].detach().cpu().numpy()

        contact_area = {
            "person1": [],
            "person2": [],
        }
        for person in contact_area:
            contact_area[person] = np.unique(contact_points[person]["closest_point"])

        new_contact_info = contact_points.copy()
        for person in origin_contact_info:
            source_contact = contact_area[person]
            # input: source contact, list, output: source contact, list; target contact, list
            os.makedirs(join(save_dir, new_obj_name + "_contact"), exist_ok = True)
            _, target_contact = tranfer_contact_to_new_obj("/home/liuyun/HHO-dataset/data_processing/Tink/DeepSDF_OakInk/data/sdf/", map_obj_to_sdf(ori_obj_name)["id"], new_obj_name, origin_mesh, map_obj_to_sdf(ori_obj_name)["type"], source_contact, person, visualize=True, vis_save_file=join(save_dir, new_obj_name + "_contact"))
            # TODO 处理过于远的点

            contact_mapping = dict(zip(source_contact, target_contact))
            new_contact_info[person]["closest_point"] = np.array([contact_mapping[i] for i in new_contact_info[person]["closest_point"].reshape(-1)]).reshape(new_contact_info[person]["closest_point"].shape)

        # intrinsic = np.array([[-600, 0, 640], [0, -600, 360], [0, 0, 1]])
        # pyt3d_wrapper_view1 = Pyt3DWrapper(image_size=(1200, 900), use_fixed_cameras=False, eyes=[np.float32([0.0, -4.0, 4.0])], intrin=intrinsic, device="cuda:0")
        if contact_vis:
            for person in new_contact_info:
                contact_t = new_contact_info[person]["closest_point"]

                pcd_tree = o3d.geometry.KDTreeFlann(target_pcd)
                draw_vert = (vert @ np.array([[-0.01238667,-0.02917804,0.99949747],[-0.02601937,0.9992451,0.02884821],[-0.99958469,-0.02564896,-0.01313651]]).T+np.array([[-0.48912618,0.3995658,-0.29797912]])) @ np.array([[0, -1, 0], [-0.966, 0, 0.26], [-0.26, 0, -0.966]]) + np.array([[1.0, 1.0, 1.0]])
                video_v1 = []
                for idx in range(start_frame, end_frame):
                    all_index = []
                    for i in range(10):
                        if not new_contact_info[person]["contact"][idx][i]:  # 可视化时距离小于5cm认为发生contact
                            continue
                        [k, index, _] = pcd_tree.search_radius_vector_3d(target_pcd.points[contact_t[idx][i]], 0.05)
                        all_index.extend(index)
                    img1 = pointcloud_render(draw_vert, intrinsic, pyt3d_wrapper_view1, index=all_index)
                    img1 = img1[450:, 400:1000, :]
                    img1 = cv2.resize(img1, None, fx=2, fy=2, interpolation=cv2.INTER_LINEAR)
                    video_v1.append(img1.astype(np.uint8))


                print("save video")
                fourcc = cv2.VideoWriter_fourcc(*'mp4v')
                videoWriter = cv2.VideoWriter(join(data_dir, f"{person}_contact.mp4"), fourcc, 10, (1200, 900))
                for frame in video_v1:
                    videoWriter.write(frame)
                videoWriter.release()
                print(f"done visualizing contact areas to {join(data_dir, f'{person}_contact.mp4')}")

        for person in new_contact_info:
            new_contact_info[person]["dist"] = torch.tensor(contact_points[person]["dist"], dtype=torch.float32).to(device)
            new_contact_info[person]["closest_point"] = torch.tensor(new_contact_info[person]["closest_point"], dtype=torch.int64).to(device)
            new_contact_info[person]["contact"] = torch.tensor(contact_points[person]["contact"], dtype=torch.bool).to(device)

        
        print("finish transfering contact areas !!!")
    ######################################################################
    
    # 打分器
    cfg = load_config("/home/liuyun/codebases/HHO_VAE/expriments/test6/config.yaml")
    model = RewardModel(cfg).to(device)
    state_dict = torch.load('/home/liuyun/codebases/HHO_VAE/saved_models/zcw_20240218_2/model_600.pth')
    new_state_dict = {}
    for key, value in state_dict.items():
        if key.startswith('module.'):
            new_key = key[len('module.'):]
        else:
            new_key = key
        new_state_dict[new_key] = value
    model.load_state_dict(new_state_dict)
    model.eval()
    
    # 外层循环: 按照每个手指的retarget点的周围100点进行contact area sample, new_contact_info包含每个人每只手5个指尖到新物体的期望距离、期望最近点和期望是否发生接触 (new_contact_info[person]["closest_point"].shape = (N_frame, 10)(表示每个接触点在新物体上的编号) )
    
    sampled_target_contact_info = []  # 记录每一种可能的新物体上的contact_info, 外层循环就是枚举这个list的每一个元素
    
    # 准静态假设
    new_contact_info_from_oakink = new_contact_info.copy()
    contact_point_candidates = {  # 记录每个人每只手全程和哪些oakink的最近点接触
        "person1": {"left_hand": [], "right_hand": [],},
        "person2": {"left_hand": [], "right_hand": [],},
    }

    contact_start_frame = 0
    start_flag = True
    for person in new_contact_info_from_oakink:
        left_hand_contact_flag = new_contact_info_from_oakink[person]["contact"][:, :5].sum(axis=-1) > 0  # shape = (N_frame,), 有一个指尖接触到物体就算整只手存在contact
        right_hand_contact_flag = new_contact_info_from_oakink[person]["contact"][:, 5:].sum(axis=-1) > 0  # shape = (N_frame,), 有一个指尖接触到物体就算整只手存在contact
        for idx in range(start_frame, end_frame):
            if left_hand_contact_flag[idx]:
                if start_flag:
                    contact_start_frame = idx
                    start_flag = False
                contact_point_candidates[person]["left_hand"].append(new_contact_info_from_oakink[person]["closest_point"][idx, :5])
            if right_hand_contact_flag[idx]:
                contact_point_candidates[person]["right_hand"].append(new_contact_info_from_oakink[person]["closest_point"][idx, 5:])
    for person in contact_point_candidates:
        for hand_name in contact_point_candidates[person]:
            print("[contact_point_candidates] {}, {}: candidate number = {}".format(person, hand_name, len(contact_point_candidates[person][hand_name])))
    
    quasi_static_contact = deepcopy(new_contact_info_from_oakink)
    for person in contact_point_candidates:
        for hand_name in contact_point_candidates[person]:
            N_candidate = len(contact_point_candidates[person][hand_name])
            if N_candidate == 0:
                continue
            k = np.random.randint(0, N_candidate)
            candidate = contact_point_candidates[person][hand_name][k]
            if hand_name == "left_hand":
                quasi_static_contact[person]["closest_point"][:, :5] = candidate
            else:
                quasi_static_contact[person]["closest_point"][:, 5:] = candidate
    
    
    result_params = HOI_retargeting_given_target_contact(multiperson_SMPLX_params, retarget_obj_vert_seq, quasi_static_contact, origin_contact_info, start_frame, end_frame, use_pca, num_pca_params, use_new_obj, cfg, epoch=1500, device=device)
    temp_path = join(data_dir, "HOI_retargeting",f"{new_obj_name}noout_nodis.npy")
    np.save(temp_path, result_params)
    HOI_visualization(temp_path, object_pose_save_path, object_mesh_save_path, obj_dataset_dir, data_dir, data_dir, start_frame, end_frame, save_filename=f"{new_obj_name}noout_nodis.mp4", device=device)
    # 构造外层循环的每种contact(即构造sampled_target_contact_info)
    K = 10  # 外层循环的次数
    save_dir_for_outer_loop = join(dirname(human_pose_save_path), new_obj_name + "_results_for_each_contact_target")
    for i in range(K):
        contact_info_sample = deepcopy(new_contact_info_from_oakink)
        # 添加准静态假设
        for person in contact_point_candidates:
            for hand_name in contact_point_candidates[person]:
                N_candidate = len(contact_point_candidates[person][hand_name])
                if N_candidate == 0:
                    continue
                k = np.random.randint(0, N_candidate)
                candidate = contact_point_candidates[person][hand_name][k]
                if hand_name == "left_hand":
                    contact_info_sample[person]["closest_point"][:, :5] = candidate
                else:
                    contact_info_sample[person]["closest_point"][:, 5:] = candidate
        # 在contact附近随机新contact
        assert contact_info_sample["person1"]["closest_point"].shape[-1] == 10  # 10
        for person in contact_info_sample:
            for hand_name in ["left_hand", "right_hand"]:
                random_uniform_noise = np.random.uniform(-0.40, 0.40, 3)  # 每只手的5个接触点都施加一个相同的noise, 每个维度加的noise的范围: [-20cm, 20cm]
                if hand_name == "left_hand":
                    L, R = 0, 4
                else:
                    L, R = 5, 9
                for point_idx in range(L, R+1):
                    vertex_idx = contact_info_sample[person]["closest_point"][0, point_idx]
                    vertex_p = vert[vertex_idx]  # vert是新物体在canonical space下的mesh的vertices, vertex_p: a point, shape = (3,)
                        
                    p = vertex_p + random_uniform_noise  # shape = (3,)
                    new_idx = find_closest_point(p, vert) # 在vert上寻找最近点
                    print("[sample new target contact] {}, {}, {}, {}, contact point id = {}".format(i, person, hand_name, point_idx, new_idx))
                    contact_info_sample[person]["closest_point"][:, point_idx] = new_idx
        # 保存contact信息
        
        os.makedirs(save_dir_for_outer_loop, exist_ok=True)
        np.save(join(save_dir_for_outer_loop, str(i).zfill(4) + "_contact_info.npy"), contact_info_sample)
        save_mesh(obj_mesh, join(save_dir_for_outer_loop, "new_obj_mesh.obj"))
        
        sampled_target_contact_info.append(contact_info_sample)

    
    optimal_result = {
        "score": -10000.0,
        "contact": None
    }
    
    init_multiperson_params = None
    # subframe_start = (end_frame - contact_start_frame) // 2
    # subframe_end = min(end_frame, subframe_start + 40)
    subframe_start = start_frame
    subframe_end = end_frame
    for contact_idx, new_contact_info in enumerate(sampled_target_contact_info):
        print("processing contact sample {} ...".format(str(contact_idx).zfill(4)))

        target_contact = {"person1": [], "person2": []}
        for person in new_contact_info:
            target_contact[person] = np.unique(new_contact_info[person]["closest_point"].detach().cpu().numpy())
        
        vis_pcd(target_contact, target_pcd, join(save_dir_for_outer_loop, str(contact_idx).zfill(4) + "_contact.ply"))
        print("contact visualization")

        # 内层优化得到当前contact约束下所有帧的HH pose
        
        # subframe_start = start_frame
        # subframe_end = end_frame
        result_smplx_params = HOI_retargeting_given_target_contact(multiperson_SMPLX_params, retarget_obj_vert_seq, new_contact_info, origin_contact_info, subframe_start, subframe_end, use_pca, num_pca_params, use_new_obj, cfg, epoch=1500, device=device)

        # save and visualize
        save_dir_for_outer_loop = join(dirname(human_pose_save_path), new_obj_name + "_results_for_each_contact_target")
        os.makedirs(save_dir_for_outer_loop, exist_ok=True)
        human_pose_save_p = join(save_dir_for_outer_loop, str(contact_idx).zfill(4) + ".npy")
        np.save(human_pose_save_p, result_smplx_params)

        # use a pretrained model to score the result
        pose_result = {"person1": torch.zeros((subframe_end - subframe_start, 63)).to(device), "person2": torch.zeros((subframe_end - subframe_start, 63)).to(device)}
        for i in range(0, subframe_end - subframe_start):
            for person in pose_result:
                pose_result[person][i, :] = result_smplx_params[i][person]["body_pose"].reshape(-1).to(device)
        discriminator_score = {"person1": None, "person2": None}
        for person in discriminator_score:
            discriminator_score[person] = model(pose_result[person]).detach().cpu().numpy()
        overall_score = min(discriminator_score["person1"].mean(), discriminator_score["person2"].mean())
        
        HOI_visualization(human_pose_save_p, object_pose_save_path, object_mesh_save_path, obj_dataset_dir, data_dir, save_dir_for_outer_loop, subframe_start, subframe_end, save_filename=new_obj_name + str(contact_idx).zfill(4)+"_vis" + f"_{str(overall_score)}" + ".mp4", device=device)
        
        # select the best version
        print("discriminator_score", overall_score)
        if optimal_result["score"] < overall_score:
            optimal_result["score"] = overall_score
            optimal_result["contact"] = join(save_dir_for_outer_loop, str(contact_idx).zfill(4) + "_contact_info.npy")
    
    #############################################################

    optimal_contact = np.load(optimal_result["contact"], allow_pickle=True).item()
    optimal_params = HOI_retargeting_given_target_contact(multiperson_SMPLX_params, retarget_obj_vert_seq, optimal_contact, origin_contact_info, start_frame, end_frame, use_pca, num_pca_params, use_new_obj, cfg, epoch=1500, device=device)

    return optimal_params


def parse_args():
    parser = argparse.ArgumentParser()
    ############# 数据 ########################
    # parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230807_2/002")  # 搬桌子
    # parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230807_2/004")  # 旋转桌子
    parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230807_1/017")  # 搬棍子
    # parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230805_1/002")  # 旋转椅子
    parser.add_argument('--start_frame', '-s', type=int, default=0)
    parser.add_argument('--end_frame', '-e', type=int, default=300)
    parser.add_argument('--save_filename', type=str, default="compare_result.mp4")
    parser.add_argument('--device', type=str, default="cuda:0")
    parser.add_argument('--scale_x', type=float, default=1.0)
    parser.add_argument('--scale_y', type=float, default=1.0)
    parser.add_argument('--scale_z', type=float, default=1.0)
    parser.add_argument('--use_new_obj', action="store_true")  # true则使用new_obj
    parser.add_argument('--new_obj_id', type=str, default="")
    parser.add_argument('--force_retarget_obj', action="store_true")  # true则强制重新做obj_retargeting
    parser.add_argument('--objpose_filename', type=str, default="obj_poses.npy")
    parser.add_argument('--objmesh_filename', type=str, default="new_obj.obj")
    parser.add_argument('--npy_save_filename', type=str, default="HOI_retargeting_result.npy")
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    
    #########################################################################################
    obj_dataset_dir = "/data3/datasets/HHO_object_dataset_final/"
    # obj_data_path = "test/chair10_m.obj"
    cfg = {
        "retarget_person1": True,
        "retarget_person2": True,
    }

    args = parse_args()
    
    data_dir = args.data_dir
    device = args.device
    start_frame, end_frame = args.start_frame, args.end_frame

    if len(np.load(join(data_dir, 'aligned_objposes.npy'), allow_pickle=True)) < end_frame:
        end_frame = len(np.load(join(data_dir, 'aligned_objposes.npy'), allow_pickle=True))
        print("Warning: end_frame is larger than the length of object_result, set end_frame to {}".format(str(end_frame)))

    print(data_dir, "[", start_frame, end_frame, ")")
    
    obj_retargeting_save_dir = join(data_dir, "obj_retargeting")
    HOI_retargeting_save_dir = join(data_dir, "HOI_retargeting")

    result_save_dir = join(HOI_retargeting_save_dir, args.npy_save_filename)

    objpose_save_file = join(obj_retargeting_save_dir, args.objpose_filename)
    objmesh_save_file = join(obj_retargeting_save_dir, args.objmesh_filename)
    
    obj_name, obj_data_path = get_obj_info(data_dir, obj_dataset_dir)
    print(obj_name, obj_data_path)
    contact_pool_path = join(os.path.abspath(join(obj_data_path, "..")), "sorted_pool.npy")
    if not isfile(contact_pool_path):
        contact_pool_path = None
    assert isfile(obj_data_path)
    
    if args.use_new_obj:
        new_obj_name = args.new_obj_id
        print(obj_name, " retarget to ", new_obj_name)
    
    # object retargeting
    if args.force_retarget_obj and not args.use_new_obj:
        q_mat = torch.from_numpy(np.array([[args.scale_x, 0, 0, 0], [0, args.scale_y, 0, 0], [0, 0, args.scale_z, 0], [0, 0, 0, 1]]))
        print("q_mat", q_mat)
        os.makedirs(obj_retargeting_save_dir, exist_ok=True)
        obj_retargeting_result = obj_retargeting(data_dir, obj_data_path, q_mat, start_frame=start_frame, end_frame=end_frame, device=device)
        save_mesh(objmesh_save_file, obj_retargeting_result["obj_mesh"])
        np.save(objpose_save_file, obj_retargeting_result["obj_poses"])
    
    if (args.force_retarget_obj) and args.use_new_obj:
        os.makedirs(obj_retargeting_save_dir, exist_ok=True)
        target_data_path = get_obj_path(args.new_obj_id)
        q_mat = np.array([[1.1, 0, 0, 0], [0, 1.6, 0, 0], [0, 0, 1.5, 0], [0, 0, 0, 1]])
        q_mat = torch.from_numpy(q_mat)

        print("target_data_path", target_data_path)
        obj_retargeting_result = obj_retargeting_new_obj(data_dir, obj_data_path, target_data_path, start_frame=start_frame, end_frame=end_frame, device=device, q_mat=q_mat)
        save_mesh(obj_retargeting_result["obj_mesh"], objmesh_save_file)
        np.save(objpose_save_file, obj_retargeting_result["obj_poses"])
    
    # obj_visualization(data_dir, obj_dataset_dir, objmesh_save_file, objpose_save_file, data_dir, start_frame, end_frame, device="cuda:0")

    # print(objmesh_save_file)
    origin_mesh = trimesh.load_mesh(obj_data_path)
    obj_mesh = trimesh.load_mesh(objmesh_save_file)
    obj_poses = np.load(objpose_save_file, allow_pickle=True)
    # HOI retargeting
    if args.use_new_obj:
        HOI_retargeting_result = HOI_retargeting(data_dir, HOI_retargeting_save_dir, origin_mesh, obj_name, obj_mesh, obj_poses, \
            start_frame=start_frame, end_frame=end_frame, cfg=cfg, new_obj_name=new_obj_name, device=device, use_new_obj=True, contact_vis=False, \
            human_pose_save_path=result_save_dir, object_pose_save_path=objpose_save_file, object_mesh_save_path=objmesh_save_file, obj_dataset_dir=obj_dataset_dir, contact_pool_path=contact_pool_path)
        np.save(result_save_dir, HOI_retargeting_result)
    else:
        HOI_retargeting_result = HOI_retargeting(data_dir, HOI_retargeting_save_dir, origin_mesh, obj_name, obj_mesh, obj_poses, \
            start_frame=start_frame, end_frame=end_frame, cfg=cfg, device=device, use_new_obj=False, \
            human_pose_save_path=result_save_dir, object_pose_save_path=objpose_save_file, object_mesh_save_path=objmesh_save_file, obj_dataset_dir=obj_dataset_dir, contact_pool_path=contact_pool_path)
        np.save(result_save_dir, HOI_retargeting_result)

    # visualization
    # HOI_retargeting_result = np.load(join(HOI_retargeting_save_dir, "HOI_retargeting_result.npy"), allow_pickle=True)
    HOI_visualization(result_save_dir, objpose_save_file, objmesh_save_file, obj_dataset_dir, data_dir, data_dir, start_frame, end_frame, save_filename=args.save_filename, device=args.device)
