import os
from os.path import join, isfile
import sys
sys.path.append("/home/liuyun/HHO-dataset/data_processing/Tink")
sys.path.append("/home/liuyun/HHO-dataset/data_processing/")
import argparse
import numpy as np
import pickle
import torch
from torch import nn
import pytorch3d
import pytorch3d.io as IO
import trimesh
from smplx import smplx
import cv2
import imageio
from utils.txt2intrinsic import txt2intrinsic
from smplx.smplx.utils import Struct, to_tensor, to_np
from utils.pyt3d_wrapper import Pyt3DWrapper
from utils.avi2depth import avi2depth
from utils.time_align import time_align
from utils.process_timestamps import txt_to_paried_frameids, paired_frameids_to_txt
from utils.contact import compute_contact
from utils.VTS_object import get_obj_info, get_obj_name_correspondance
from utils.visualization import save_mesh
from utils.load_smplx_params import load_multiperson_smplx_params
from utils.object_retargeting import obj_retargeting, obj_retargeting_new_obj
from utils.contact import compute_contact_and_closest_point
from smplx.smplx.lbs import batch_rodrigues
from transforms3d.axangles import mat2axangle
import open3d as o3d
from optimization.utils import local_pose_to_global_orientation
from optimization.bvh2smplx import Simple_SMPLX, create_SMPLX_model
from utils.retargeting_visualization import HOI_visualization, obj_visualization
import matplotlib.pyplot as plt
import time
from utils.simplify_mesh import simplify_mesh
from utils.get_joints import get_joints
from Tink.tink.transform_contact_info import tranfer_contact_to_new_obj, get_obj_path
from Tink.tink.cal_contact_info import to_pointcloud


HAND_VERT_IDS = {
    'lthumb':		5361,
    'lindex':		4933,
    'lmiddle':		5058,
    'lring':		5169,
    'lpinky':		5286,
    'rthumb':		8079,
    'rindex':		7669,
    'rmiddle':		7794,
    'rring':		7905,
    'rpinky':		8022,
}

model_path = "/share/human_model/models/smplx/SMPLX_NEUTRAL.npz"
model_data = np.load(model_path, allow_pickle=True)
data_struct = Struct(**model_data)
shapedirs = data_struct.shapedirs
v_template = data_struct.v_template
J_regressor = data_struct.J_regressor
parents = data_struct.kintree_table[0]
left_hand_components = data_struct.hands_componentsl[:12]
right_hand_components = data_struct.hands_componentsr[:12]

def create_empty_SMPLX_params(N, N_betas=10, N_expression=10, N_hand_pca=12, device="cuda:0"):
    empty_SMPLX_params = {
        "betas": torch.zeros((N, N_betas), dtype=torch.float32).to(device),
        "expression": torch.zeros((N, N_expression), dtype=torch.float32).to(device),
        "global_orient": torch.zeros((N, 3), dtype=torch.float32).to(device),
        "transl": torch.zeros((N, 3), dtype=torch.float32).to(device),
        "body_pose": torch.zeros((N, 21, 3)).to(device),
        "left_hand_pose": torch.zeros((N, N_hand_pca)).to(device),
        "right_hand_pose": torch.zeros((N, N_hand_pca)).to(device),
    }
    return empty_SMPLX_params


class SMPLX_HH(nn.Module):
    def __init__(self, smplx_model, init_smplx_params, cfg):
        super(SMPLX_HH, self).__init__()
        self.smplx_model = smplx_model
        self.smplx_expression = nn.Parameter(init_smplx_params["person1"]["expression"].clone().detach(), requires_grad=False)

        self.smplx_betas_person1 = nn.Parameter(init_smplx_params["person1"]["betas"].clone().detach(), requires_grad=False)
        self.smplx_global_orient_person1 = nn.Parameter(init_smplx_params["person1"]["global_orient"].clone().detach(), requires_grad=True)
        self.smplx_body_pose_person1 = nn.Parameter(init_smplx_params["person1"]["body_pose"].clone().detach(), requires_grad=True)
        self.smplx_left_hand_pose_person1 = nn.Parameter(init_smplx_params["person1"]["left_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_right_hand_pose_person1 = nn.Parameter(init_smplx_params["person1"]["right_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_transl_person1 = nn.Parameter(init_smplx_params["person1"]["transl"].clone().detach(), requires_grad=True)
        self.smplx_jaw_pose_person1 = nn.Parameter(torch.zeros([self.smplx_betas_person1.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_leye_pose_person1 = nn.Parameter(torch.zeros([self.smplx_betas_person1.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_reye_pose_person1 = nn.Parameter(torch.zeros([self.smplx_betas_person1.shape[0], 3], dtype=torch.float32), requires_grad=False)

        self.smplx_betas_person2 = nn.Parameter(init_smplx_params["person2"]["betas"].clone().detach(), requires_grad=False)
        self.smplx_global_orient_person2 = nn.Parameter(init_smplx_params["person2"]["global_orient"].clone().detach(), requires_grad=True)
        self.smplx_body_pose_person2 = nn.Parameter(init_smplx_params["person2"]["body_pose"].clone().detach(), requires_grad=True)
        self.smplx_left_hand_pose_person2 = nn.Parameter(init_smplx_params["person2"]["left_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_right_hand_pose_person2 = nn.Parameter(init_smplx_params["person2"]["right_hand_pose"].clone().detach(), requires_grad=False)
        self.smplx_transl_person2 = nn.Parameter(init_smplx_params["person2"]["transl"].clone().detach(), requires_grad=True)
        self.smplx_jaw_pose_person2 = nn.Parameter(torch.zeros([self.smplx_betas_person2.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_leye_pose_person2 = nn.Parameter(torch.zeros([self.smplx_betas_person2.shape[0], 3], dtype=torch.float32), requires_grad=False)
        self.smplx_reye_pose_person2 = nn.Parameter(torch.zeros([self.smplx_betas_person2.shape[0], 3], dtype=torch.float32), requires_grad=False)
        
        # if cfg["OPT_BETAS"]:
        #     self.smplx_betas.requires_grad = True
        # if cfg["OPT_EXPRESSION"]:
        #     self.smplx_expression.requires_grad = True
        # if cfg["OPT_POSE"]:
        #     self.smplx_global_orient.requires_grad = True
        #     self.smplx_transl.requires_grad = True
        #     self.smplx_body_pose.requires_grad = True
        #     self.smplx_left_hand_pose.requires_grad = True
        #     self.smplx_right_hand_pose.requires_grad = True
        # self.cfg = cfg
    
    def forward(self):
        # result_model_person1 = self.smplx_model(betas=self.smplx_betas_person1, expression=self.smplx_expression, global_orient=self.smplx_global_orient_person1, transl=self.smplx_transl_person1, body_pose=self.smplx_body_pose_person1, left_hand_pose=self.smplx_left_hand_pose_person1, right_hand_pose=self.smplx_right_hand_pose_person1, jaw_pose=self.smplx_jaw_pose_person1, leye_pose=self.smplx_leye_pose_person1, reye_pose=self.smplx_reye_pose_person1, return_verts=True)
        # result_vertices_person1 = result_model_person1.vertices
        # result_joints_person1 = result_model_person1.joints
        # result_model_person2 = self.smplx_model(betas=self.smplx_betas_person2, expression=self.smplx_expression, global_orient=self.smplx_global_orient_person2, transl=self.smplx_transl_person2, body_pose=self.smplx_body_pose_person2, left_hand_pose=self.smplx_left_hand_pose_person2, right_hand_pose=self.smplx_right_hand_pose_person2, jaw_pose=self.smplx_jaw_pose_person2, leye_pose=self.smplx_leye_pose_person2, reye_pose=self.smplx_reye_pose_person2, return_verts=True)
        # result_vertices_person2 = result_model_person2.vertices
        # result_joints_person2 = result_model_person2.joints
        # result_joints_person1 = get_joints(self.smplx_betas_person1, self.smplx_body_pose_person1)
        # print(result_joints_person1.shape) 
        result_joints_person1 = get_joints(self.smplx_global_orient_person1, self.smplx_betas_person1, self.smplx_body_pose_person1, self.smplx_transl_person1, self.smplx_left_hand_pose_person1, self.smplx_right_hand_pose_person1, left_hand_components, right_hand_components, shapedirs, v_template, J_regressor, parents)
        result_joints_person2 = get_joints(self.smplx_global_orient_person2, self.smplx_betas_person2, self.smplx_body_pose_person2, self.smplx_transl_person2, self.smplx_left_hand_pose_person2, self.smplx_right_hand_pose_person2, left_hand_components, right_hand_components, shapedirs, v_template, J_regressor, parents)
        results = {
            "person1": {
                # "vertices": result_vertices_person1,
                "joints": result_joints_person1,
                "betas": self.smplx_betas_person1,
                "expression": self.smplx_expression,
                "global_orient": self.smplx_global_orient_person1,
                "transl": self.smplx_transl_person1,
                "body_pose": self.smplx_body_pose_person1,
                "left_hand_pose": self.smplx_left_hand_pose_person1,
                "right_hand_pose": self.smplx_right_hand_pose_person1,
            },
            "person2": {
                # "vertices": result_vertices_person2,
                "joints": result_joints_person2,
                "betas": self.smplx_betas_person2,
                "expression": self.smplx_expression,
                "global_orient": self.smplx_global_orient_person2,
                "transl": self.smplx_transl_person2,
                "body_pose": self.smplx_body_pose_person2,
                "left_hand_pose": self.smplx_left_hand_pose_person2,
                "right_hand_pose": self.smplx_right_hand_pose_person2,
            }
        }
        return results


def compute_contact_info(human_params, smplx_model, idx, obj_vertices, threshould=0.05, device="cuda:0"):
    """
    [input]
    * human_params: SMPLX params
    * idx: frame idx
    * obj_vertices: torch.float32, shape = (M, 3)
    
    human SMPLX mesh: shape = (N, 3)
    
    [return]
    * contact: torch.bool, shape = (N)
    * dist: torch.float32, shape = (N)
    * closest_point: torch.int64, shape = (N)
    """
    result_model = smplx_model(betas=human_params["betas"][idx:idx+1].detach().to(device), expression=human_params["expression"][idx:idx+1].detach().to(device), global_orient=human_params["global_orient"][idx:idx+1].detach().to(device), transl=human_params["transl"][idx:idx+1].detach().to(device), body_pose=human_params["body_pose"][idx:idx+1].detach().to(device), left_hand_pose=human_params["left_hand_pose"][idx:idx+1].detach().to(device), right_hand_pose=human_params["right_hand_pose"][idx:idx+1].detach().to(device), return_verts=True)
    human_vertices = result_model.vertices[0]  # human vertices
    contact, dist, closest_point = compute_contact_and_closest_point(human_vertices, obj_vertices, threshould=threshould)
    return contact, dist, closest_point


def naive_contact_loss(person_origin_contact_info, ids, new_human_vertices, new_obj_vertices):
    # get contact_info from original data
    contact_flag = person_origin_contact_info["contact"][ids]  # (B, N_human_vertex)
    dist = person_origin_contact_info["dist"][ids]  # (B, N_human_vertex)
    closest_point = person_origin_contact_info["closest_point"][ids]  # (B, N_human_vertex)
    B, N_human_vertex = closest_point.shape
    
    # contact loss
    # TODO: use contact_flag !!!
    rows = torch.arange(0, B).unsqueeze(1).repeat(1, N_human_vertex).reshape(-1).to(device)  # (B * N_human_vertex)
    # print(new_human_vertices.shape, new_obj_vertices.shape, rows.shape, closest_point.shape)
    real_diff = new_human_vertices - new_obj_vertices[rows, closest_point.reshape(-1)].reshape(B, N_human_vertex, 3)  # (B, N_human_vertex, 3)
    contact_loss = torch.sum(contact_flag * torch.abs(torch.sum(real_diff**2, dim=-1)**(0.5) - dist))
    return contact_loss

def naive_joint_contact_loss(person_origin_contact_info, ids, new_human_joints, new_obj_vertices, origin_human_joints):
    dist = person_origin_contact_info["dist"][ids]
    closest_point = person_origin_contact_info["closest_point"][ids]
    contact_flag = person_origin_contact_info["contact"][ids]
    B, N_human_vertex = closest_point.shape

    # contact loss
    hand_vertex_closest = closest_point[:, list(HAND_VERT_IDS.values())] # B x 10
    hand_vertex_dist = dist[:, list(HAND_VERT_IDS.values())] # B x 10
    new_obj_contact_vert = new_obj_vertices[torch.arange(B).unsqueeze(1), hand_vertex_closest] # B x 10 x 3
    ori_vec = origin_human_joints[:, 66:76, :] - new_obj_contact_vert
    hand_vertex_diff_vec = new_human_joints[:, 61:71, :] - new_obj_contact_vert # B x 10 x 3
    # print((torch.sum(hand_vertex_diff**2, dim=-1)**(0.5)).shape)
    hand_vertex_contact_loss = torch.sum((torch.sum(hand_vertex_diff_vec**2, dim=-1)**(0.5) - hand_vertex_dist)**2)
    # contact_vec_loss = torch.sum((hand_vertex_diff_vec - ori_vec)**2)
    return hand_vertex_contact_loss


# new_contact_area["person1"] (B, contact_index)
def set_contact_area(person_origin_contact_info, ids, new_contact_area):
    """
    [input]
    * person_origin_contact_info: dict
    * ids: frame idx list
    * new_contact_area: B * N_human_vertex like new closest_point
    
    [return]
    * contact: torch.bool, shape = (N)
    * dist: torch.float32, shape = (N)
    * new_closest_point: torch.int64, shape = (N)
    """
    contact_flag = person_origin_contact_info["contact"][ids]
    dist = person_origin_contact_info["dist"][ids]
    closest_point = person_origin_contact_info["closest_point"][ids]
    B, N_human_vertex = closest_point.shape

    person_origin_contact_info["closest_point"][ids] = new_contact_area[ids]
    

def HOI_retargeting(data_dir, save_dir, origin_mesh, obj_mesh, obj_poses, start_frame, end_frame, cfg, device="cuda:0", use_new_obj=False):
    """
    目前这一环节只优化human pose
    """
    
    start_time = time.perf_counter()
    os.makedirs(save_dir, exist_ok=True)
    use_pca, num_pca_params = True, 12

    # if len(np.load(join(data_dir, 'aligned_objposes.npy'), allow_pickle=True)) < end_frame:
    #     end_frame = len(np.load(join(data_dir, 'aligned_objposes.npy'), allow_pickle=True))
    #     print("Warning: end_frame is larger than the length of object_result, set end_frame to {}".format(str(end_frame)))
    
    # (1) load gt human poses and simplify obj_mesh
    try:
        multiperson_SMPLX_params = load_multiperson_smplx_params(join(data_dir, "SMPLX_fitting"), start_frame=start_frame, end_frame=end_frame, device=device)
    except Exception as e:
        raise e

    print(multiperson_SMPLX_params["person1"]["body_pose"].shape)
    print(multiperson_SMPLX_params.keys(), multiperson_SMPLX_params["person2"].keys(), multiperson_SMPLX_params["person2"]["body_pose"].shape, multiperson_SMPLX_params["person1"]["joints"].shape)
    if origin_mesh.vertices.shape[0] > 10000:
        origin_mesh, obj_mesh = simplify_mesh(origin_mesh, obj_mesh)
        print("downsampled object mesh: vertices shape =", origin_mesh.vertices.shape)
    count_time = time.perf_counter()
    print("time for loading data and simplifying meshes: {}s".format(str(count_time - start_time)))
    # (2) compute per-frame HOH meshes and compute contact
    vert, face = obj_mesh.vertices, obj_mesh.faces
    gt_obj_rot_vec = []
    gt_obj_trans_vec = []
    for i in range(start_frame, end_frame):
        gt_obj_rot_vec.append(obj_poses[i]["rotation"].cpu().numpy())
        gt_obj_trans_vec.append(obj_poses[i]["translation"].cpu().numpy())
    gt_obj_rot_vec = torch.tensor(gt_obj_rot_vec, dtype=torch.float64).to(device).squeeze(1)
    print(gt_obj_rot_vec.shape)
    gt_obj_rot_mat = batch_rodrigues(gt_obj_rot_vec).cpu().numpy()
    gt_obj_trans_vec = np.array(gt_obj_trans_vec)
    retarget_obj_vert_seq = (np.array(gt_obj_rot_mat @ vert.T)).transpose(0, 2, 1) + gt_obj_trans_vec
    retarget_obj_vert_seq = torch.from_numpy(retarget_obj_vert_seq).to(device)  # (N, 3)

    origin_vert, origin_face = origin_mesh.vertices, origin_mesh.faces
    object_dir = join(data_dir, 'aligned_objposes.npy')
    origin_pose = np.load(object_dir, allow_pickle=True)[start_frame:end_frame]
    origin_vert_seq = (np.array(origin_pose[:, :3, :3] @ origin_vert.T)).transpose(0, 2, 1) + np.expand_dims(origin_pose[:, :3, 3], axis=1)
    origin_vert_seq = torch.from_numpy(origin_vert_seq).to(device)  # (N, 3)
    
    ######################################################################
    # preprocess contact areas
    print("start preparing contact areas ...")
    origin_contact_info = {
        "person1": {"contact": [], "dist": [], "closest_point": []},
        "person2": {"contact": [], "dist": [], "closest_point": []},
    }
    contact_threshould=0.05
    # contact_threshould = 0.01
    smplx_model = create_SMPLX_model(use_pca=use_pca, num_pca_comps=num_pca_params, batch_size=1, device=device)
    
    for idx in range(0, end_frame - start_frame):
        # person1 to original obj
        contact, dist, closest_point = compute_contact_info(multiperson_SMPLX_params["person1"], smplx_model, idx, origin_vert_seq[idx], threshould=contact_threshould, device=device)
        origin_contact_info["person1"]["contact"].append(contact.detach().cpu().numpy())
        origin_contact_info["person1"]["dist"].append(dist.detach().cpu().numpy())
        origin_contact_info["person1"]["closest_point"].append(closest_point.detach().cpu().numpy())
        # person2 to original obj
        contact, dist, closest_point = compute_contact_info(multiperson_SMPLX_params["person2"], smplx_model, idx, origin_vert_seq[idx], threshould=contact_threshould, device=device)
        origin_contact_info["person2"]["contact"].append(contact.detach().cpu().numpy())
        origin_contact_info["person2"]["dist"].append(dist.detach().cpu().numpy())
        origin_contact_info["person2"]["closest_point"].append(closest_point.detach().cpu().numpy())
    
    for person in origin_contact_info:
        origin_contact_info[person]["contact"] = torch.tensor(origin_contact_info[person]["contact"], dtype=torch.bool).to(device)
        origin_contact_info[person]["dist"] = torch.tensor(origin_contact_info[person]["dist"], dtype=torch.float32).to(device)
        origin_contact_info[person]["closest_point"] = torch.tensor(origin_contact_info[person]["closest_point"], dtype=torch.int64).to(device)
    print("finish preparing contact areas !!!")


    print(origin_contact_info["person1"]["contact"].shape, origin_contact_info["person1"]["dist"].shape, origin_contact_info["person1"]["closest_point"].shape)
    
    ######################################################################
        
    
    # (3) TODO: human-only retargeting
    origin_smplx_params = {
        "person1": {
            "betas": multiperson_SMPLX_params["person1"]["betas"].detach().to(device),
            "expression": multiperson_SMPLX_params["person1"]["expression"].detach().to(device),
            "global_orient": multiperson_SMPLX_params["person1"]["global_orient"].detach().to(device),
            "transl": multiperson_SMPLX_params["person1"]["transl"].detach().to(device),
            "body_pose": multiperson_SMPLX_params["person1"]["body_pose"].detach().to(device),
            "left_hand_pose": multiperson_SMPLX_params["person1"]["left_hand_pose"].detach().to(device),
            "right_hand_pose": multiperson_SMPLX_params["person1"]["right_hand_pose"].detach().to(device),
            "joints": multiperson_SMPLX_params["person1"]["joints"].detach().to(device),
        },
        "person2": {
            "betas": multiperson_SMPLX_params["person2"]["betas"].detach().to(device),
            "expression": multiperson_SMPLX_params["person2"]["expression"].detach().to(device),
            "global_orient": multiperson_SMPLX_params["person2"]["global_orient"].detach().to(device),
            "transl": multiperson_SMPLX_params["person2"]["transl"].detach().to(device),
            "body_pose": multiperson_SMPLX_params["person2"]["body_pose"].detach().to(device),
            "left_hand_pose": multiperson_SMPLX_params["person2"]["left_hand_pose"].detach().to(device),
            "right_hand_pose": multiperson_SMPLX_params["person2"]["right_hand_pose"].detach().to(device),
            "joints": multiperson_SMPLX_params["person2"]["joints"].detach().to(device),
        }
    }
    init_smplx_params = None  # 第一个batch用origin_smplx_params初始化
    
    # parameters in local/global reality loss
    # strong_relative_joints = np.int32([15, 16, 17, 18, 19, 20, 21])  # 在joints里的编号(0-127, 0是root)
    strong_relative_joints = np.int32([15, 16, 17, 20, 21])  # 在joints里的编号(0-127, 0是root)
    arm_joints = np.int32([18, 19])
    # weak_relative_joints = [i for i in range(0, 127) if i-1 not in strong_relative_joint]
    weak_relative_joints = np.int32([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14])  # 在joints里的编号(0-127, 0是root)
    feet_joints = np.int32([7, 8, 10, 11, 60, 61, 62, 63, 64, 65])
    # feet_joints = np.int32([7, 8, 10, 11, 55, 56, 57, 58, 59, 60])
    left_foot_joints = np.int32([7, 10, 60, 61, 62])
    right_foot_joints = np.int32([8, 11, 63, 64, 65])
    # left_foot_joints = np.int32([7, 10, 55, 56, 57])
    # right_foot_joints = np.int32([8, 11, 58, 59, 60])
    left_feet_ground_joints = np.int32([60, 61, 62])
    # left_feet_ground_joints = np.int32([57])
    right_feet_ground_joints = np.int32([63, 64, 65])
    # right_feet_ground_joints = np.int32([60])
    feet_ground_joints = np.int32([62, 65])
    head_joint = np.int32([15])
    hand_joints= np.int32([20, 21])
    lambda_regularizer, lambda_smoothness, lambda_reality, lambda_contact, lambda_v, lambda_feet, lambda_arm_loss = 1e-1, 1e-1, [1e-2, 1e-1, 3e-2], 1.0, 1.0, 1, 1e-3
    learning_rate = 1e-2

    print("strong_relative_joints =", strong_relative_joints)
    print("weak_relative_joints =", weak_relative_joints)
    print("feet_joints =", feet_joints)
    print("lambda_regularizer =", lambda_regularizer)
    print("lambda_smoothness =", lambda_smoothness)
    print("lambda_reality =", lambda_reality)
    print("lambda_contact =", lambda_contact)
    print("lambda_v =", lambda_v)
    print("lambda_feet =", lambda_feet)
    print("lambda_arm_loss =", lambda_arm_loss)
    print("learning_rate =", learning_rate)
    


    M = end_frame - start_frame  # batch size
    # M = 50
    # N_epoch = 20000
    N_epoch = 300
    result_smplx_params = []
    for i in range(start_frame, end_frame, M):
        ids = np.arange(i, min(i+M, end_frame))
        L = ids.shape[0]
        print("------------ optimizing frame {} - {} -----------------------".format(str(i), str(i+L-1)))
        # initialization
        sequence_init_smplx_params = {}
        if i == start_frame:  # the first batch
            for person in origin_smplx_params:
                sequence_init_smplx_params[person] = {
                    "betas": origin_smplx_params[person]["betas"][ids - start_frame, :].clone().detach(),
                    "expression": origin_smplx_params[person]["expression"][ids - start_frame, :].clone().detach(),
                    "global_orient": origin_smplx_params[person]["global_orient"][ids - start_frame, :].clone().detach(),
                    "transl": origin_smplx_params[person]["transl"][ids - start_frame, :].clone().detach(),
                    "body_pose": origin_smplx_params[person]["body_pose"][ids - start_frame, :].clone().detach(),
                    "left_hand_pose": origin_smplx_params[person]["left_hand_pose"][ids - start_frame, :].clone().detach(),
                    "right_hand_pose": origin_smplx_params[person]["right_hand_pose"][ids - start_frame, :].clone().detach(),
                }
        else:  # not the first batch
            for person in init_smplx_params:
                sequence_init_smplx_params[person] = {}
                for key in init_smplx_params[person]:
                    sequence_init_smplx_params[person][key] = init_smplx_params[person][key].expand((L,) + init_smplx_params[person][key].shape[1:]).clone()
        

        smplx_model = create_SMPLX_model(use_pca=use_pca, num_pca_comps=num_pca_params, batch_size=L, device=device)
        optim_model = SMPLX_HH(smplx_model, sequence_init_smplx_params, cfg)
        optim_model.to(device)
        optimizer = torch.optim.Adam(optim_model.parameters(), lr=learning_rate)
        # optimizer = torch.optim.LGFBS(optim_model.parameters(), lr = 1)
        # optimizer = torch.optim.AdamW(params=optim_model.parameters(), lr=learning_rate)
        optim_model.train()

        # TODO batch smoothness loss
        loss_rec = []

        epoch_start_time = time.perf_counter()

        p1_left_if_onground = torch.ge(origin_smplx_params["person1"]["joints"][ids-start_frame][:, left_feet_ground_joints, 1], 0.05).repeat(1, 5).unsqueeze(2).repeat(1, 1, 2)
        p1_right_if_onground = torch.ge(origin_smplx_params["person1"]["joints"][ids-start_frame][:, right_feet_ground_joints, 1], 0.05).repeat(1, 5).unsqueeze(2).repeat(1, 1, 2)
        p2_left_if_onground = torch.ge(origin_smplx_params["person2"]["joints"][ids-start_frame][:, left_feet_ground_joints, 1], 0.05).repeat(1, 5).unsqueeze(2).repeat(1, 1, 2)
        p2_right_if_onground = torch.ge(origin_smplx_params["person2"]["joints"][ids-start_frame][:, right_feet_ground_joints, 1], 0.05).repeat(1, 5).unsqueeze(2).repeat(1, 1, 2)
        x_z = torch.tensor([0, 2]).to(device)

        for epoch in range(N_epoch):
            optimizer.zero_grad()
            results = optim_model()
            # vert_p1 = results["person1"]["vertices"]
            # vert_p2 = results["person2"]["vertices"]
            joints_p1, body_pose_p1, left_hand_pose_p1, right_hand_pose_p1, transl_p1, global_orient_p1, joints_p1 = results["person1"]["joints"], results["person1"]["body_pose"], results["person1"]["left_hand_pose"], results["person1"]["right_hand_pose"], results["person1"]["transl"], results["person1"]["global_orient"], results["person1"]["joints"]
            joints_p2, body_pose_p2, left_hand_pose_p2, right_hand_pose_p2, transl_p2, global_orient_p2, joints_p2 = results["person2"]["joints"], results["person2"]["body_pose"], results["person2"]["left_hand_pose"], results["person2"]["right_hand_pose"], results["person2"]["transl"], results["person2"]["global_orient"], results["person2"]["joints"]
            # body_pose_p1, left_hand_pose_p1, right_hand_pose_p1, transl_p1, global_orient_p1 = results["person1"]["body_pose"], results["person1"]["left_hand_pose"], results["person1"]["right_hand_pose"], results["person1"]["transl"], results["person1"]["global_orient"]
            # body_pose_p2, left_hand_pose_p2, right_hand_pose_p2, transl_p2, global_orient_p2 = results["person2"]["body_pose"], results["person2"]["left_hand_pose"], results["person2"]["right_hand_pose"], results["person2"]["transl"], results["person2"]["global_orient"]
            # joints_p1 = get_joints(global_orient_p1, results["person1"]["betas"], body_pose_p1, left_hand_pose_p1, right_hand_pose_p1, left_hand_components, right_hand_components, shapedirs, v_template, J_regressor, parents, device)
            # joints_p2 = get_joints(global_orient_p2, results["person2"]["betas"], body_pose_p2, left_hand_pose_p2, right_hand_pose_p2, left_hand_components, right_hand_components, shapedirs, v_template, J_regressor, parents, device)
            # print(joints_p1.shape, joints_p2.shape)
            
            # print(vert_p1.shape, "1111")
            regularizer_p1 = torch.sum(body_pose_p1**2) * 1e-3 + torch.sum(left_hand_pose_p1**2) * 1e-4 + torch.sum(right_hand_pose_p1**2) * 1e-4
            regularizer_p2 = torch.sum(body_pose_p2**2) * 1e-3 + torch.sum(left_hand_pose_p2**2) * 1e-4 + torch.sum(right_hand_pose_p2**2) * 1e-4
            # time_1 = time.perf_counter()
            # print("time for computing regularizer: {}s".format(str(time_1 - epoch_start_time)))

            smoothness_p1 = (torch.sum((2 * body_pose_p1[1:-1] - body_pose_p1[:-2] - body_pose_p1[2:])**2) + torch.sum((2 * left_hand_pose_p1[1:-1] - left_hand_pose_p1[:-2] - left_hand_pose_p1[2:])**2) + torch.sum((2 * right_hand_pose_p1[1:-1] - right_hand_pose_p1[:-2] - right_hand_pose_p1[2:])**2))  # acceleration
            smoothness_p2 = (torch.sum((2 * body_pose_p2[1:-1] - body_pose_p2[:-2] - body_pose_p2[2:])**2) + torch.sum((2 * left_hand_pose_p2[1:-1] - left_hand_pose_p2[:-2] - left_hand_pose_p2[2:])**2) + torch.sum((2 * right_hand_pose_p2[1:-1] - right_hand_pose_p2[:-2] - right_hand_pose_p2[2:])**2))  # acceleration
            
            # print("time for computing smoothness: {}s".format(str(time.perf_counter() - time_1)))
            # time_1 = time.perf_counter()

            if (i > start_frame) and (L >= 2):
                smoothness_to_init_p1 = (torch.sum((2 * body_pose_p1[0] - result_smplx_params[-1]["person1"]["body_pose"][0].detach() - body_pose_p1[1])**2) + torch.sum((2 * left_hand_pose_p1[0] - result_smplx_params[-1]["person1"]["left_hand_pose"][0].detach() - left_hand_pose_p1[1])**2) + torch.sum((2 * right_hand_pose_p1[0] - result_smplx_params[-1]["person1"]["right_hand_pose"][0].detach() - right_hand_pose_p1[1])**2))  # acceleration
                smoothness_to_init_p2 = (torch.sum((2 * body_pose_p2[0] - result_smplx_params[-1]["person2"]["body_pose"][0].detach() - body_pose_p2[1])**2) + torch.sum((2 * left_hand_pose_p2[0] - result_smplx_params[-1]["person2"]["left_hand_pose"][0].detach() - left_hand_pose_p2[1])**2) + torch.sum((2 * right_hand_pose_p2[0] - result_smplx_params[-1]["person2"]["right_hand_pose"][0].detach() - right_hand_pose_p2[1])**2))  # acceleration
            else:
                smoothness_to_init_p1 = 0.0
                smoothness_to_init_p2 = 0.0
            # print("time for computing smoothness_to_init: {}s".format(str(time.perf_counter() - time_1)))
            # time_1 = time.perf_counter()

            # reality_loss_p1 = torch.sum(torch.abs(body_pose_p1 - origin_smplx_params["person1"]["body_pose"][ids].detach())) + torch.sum(torch.abs(left_hand_pose_p1 - origin_smplx_params["person1"]["left_hand_pose"][ids].detach())) + torch.sum(torch.abs(right_hand_pose_p1 - origin_smplx_params["person1"]["right_hand_pose"][ids].detach())) + torch.sum(torch.abs(transl_p1 - origin_smplx_params["person1"]["transl"][ids].detach())) + torch.sum(torch.abs(global_orient_p1 - origin_smplx_params["person1"]["global_orient"][ids].detach()))
            # reality_loss_p2 = torch.sum(torch.abs(body_pose_p2 - origin_smplx_params["person2"]["body_pose"][ids].detach())) + torch.sum(torch.abs(left_hand_pose_p2 - origin_smplx_params["person2"]["left_hand_pose"][ids].detach())) + torch.sum(torch.abs(right_hand_pose_p2 - origin_smplx_params["person2"]["right_hand_pose"][ids].detach())) + torch.sum(torch.abs(transl_p2 - origin_smplx_params["person2"]["transl"][ids].detach())) + torch.sum(torch.abs(global_orient_p2 - origin_smplx_params["person2"]["global_orient"][ids].detach()))
            
            # 胳膊上的关节local旋转的loss  TODO: 应该换成L2
            # local_reality_loss_p1 = torch.sum(torch.abs(body_pose_p1[:, strong_relative_joint] - origin_smplx_params["person1"]["body_pose"][ids][:, strong_relative_joint].detach())) + torch.sum(torch.abs(left_hand_pose_p1 - origin_smplx_params["person1"]["left_hand_pose"][ids].detach())) + torch.sum(torch.abs(right_hand_pose_p1 - origin_smplx_params["person1"]["right_hand_pose"][ids].detach())) + torch.sum(torch.abs(transl_p1 - origin_smplx_params["person1"]["transl"][ids].detach())) + torch.sum(torch.abs(global_orient_p1 - origin_smplx_params["person1"]["global_orient"][ids].detach()))
            # local_reality_loss_p2 = torch.sum(torch.abs(body_pose_p2[:, strong_relative_joint] - origin_smplx_params["person2"]["body_pose"][ids][:, strong_relative_joint].detach())) + torch.sum(torch.abs(left_hand_pose_p2 - origin_smplx_params["person2"]["left_hand_pose"][ids].detach())) + torch.sum(torch.abs(right_hand_pose_p2 - origin_smplx_params["person2"]["right_hand_pose"][ids].detach())) + torch.sum(torch.abs(transl_p2 - origin_smplx_params["person2"]["transl"][ids].detach())) + torch.sum(torch.abs(global_orient_p2 - origin_smplx_params["person2"]["global_orient"][ids].detach()))
            # L1 loss
            # local_reality_loss_p1 = torch.sum(torch.abs(body_pose_p1[:, strong_relative_joints - 1] - origin_smplx_params["person1"]["body_pose"][ids-start_frame][:, strong_relative_joints - 1].detach())) + torch.sum(torch.abs(left_hand_pose_p1 - origin_smplx_params["person1"]["left_hand_pose"][ids-start_frame].detach())) + torch.sum(torch.abs(right_hand_pose_p1 - origin_smplx_params["person1"]["right_hand_pose"][ids-start_frame].detach()))
            # local_reality_loss_p2 = torch.sum(torch.abs(body_pose_p2[:, strong_relative_joints - 1] - origin_smplx_params["person2"]["body_pose"][ids-start_frame][:, strong_relative_joints - 1].detach())) + torch.sum(torch.abs(left_hand_pose_p2 - origin_smplx_params["person2"]["left_hand_pose"][ids-start_frame].detach())) + torch.sum(torch.abs(right_hand_pose_p2 - origin_smplx_params["person2"]["right_hand_pose"][ids-start_frame].detach()))
            # L2 loss
            local_reality_loss_p1 = torch.sum((body_pose_p1[:, strong_relative_joints - 1] - origin_smplx_params["person1"]["body_pose"][ids-start_frame][:, strong_relative_joints - 1].detach())**2) + torch.sum((left_hand_pose_p1 - origin_smplx_params["person1"]["left_hand_pose"][ids-start_frame].detach())**2) + torch.sum((right_hand_pose_p1 - origin_smplx_params["person1"]["right_hand_pose"][ids-start_frame].detach())**2)
            local_reality_loss_p2 = torch.sum((body_pose_p2[:, strong_relative_joints - 1] - origin_smplx_params["person2"]["body_pose"][ids-start_frame][:, strong_relative_joints - 1].detach())**2) + torch.sum((left_hand_pose_p2 - origin_smplx_params["person2"]["left_hand_pose"][ids-start_frame].detach())**2) + torch.sum((right_hand_pose_p2 - origin_smplx_params["person2"]["right_hand_pose"][ids-start_frame].detach())**2)
            arm_loss_p1 = torch.sum((body_pose_p1[:, arm_joints] - origin_smplx_params["person1"]["body_pose"][ids-start_frame][:, arm_joints].detach())**2)
            arm_loss_p2 = torch.sum((body_pose_p2[:, arm_joints] - origin_smplx_params["person2"]["body_pose"][ids-start_frame][:, arm_joints].detach())**2)
            local_rotation_reality_loss = local_reality_loss_p1 + local_reality_loss_p2
            arm_loss = arm_loss_p1 + arm_loss_p2
            # print("time for computing local_reality_loss: {}s".format(str(time.perf_counter() - time_1)))
            # time_1 = time.perf_counter()
            # 胳膊上的关节相对胸口关节的平移向量的loss  TODO: 应该换成L2
            strong_relative_vector_p1 = joints_p1[:, strong_relative_joints] - joints_p1[:, 9].unsqueeze(1).repeat(1, len(strong_relative_joints), 1)
            gt_relative_vector_p1 = origin_smplx_params["person1"]["joints"][ids-start_frame][:, strong_relative_joints] - origin_smplx_params["person1"]["joints"][ids-start_frame][:, 9].unsqueeze(1).repeat(1, len(strong_relative_joints), 1)
            local_relative_reality_loss_p1 = torch.sum((strong_relative_vector_p1 - gt_relative_vector_p1)**2)
            strong_relative_vector_p2 = joints_p2[:, strong_relative_joints] - joints_p2[:, 9].unsqueeze(1).repeat(1, len(strong_relative_joints), 1)
            gt_relative_vector_p2 = origin_smplx_params["person2"]["joints"][ids-start_frame][:, strong_relative_joints] - origin_smplx_params["person2"]["joints"][ids-start_frame][:, 9].unsqueeze(1).repeat(1, len(strong_relative_joints), 1)
            local_relative_reality_loss_p2 = torch.sum((strong_relative_vector_p2 - gt_relative_vector_p2)**2)
            local_relative_reality_loss = local_relative_reality_loss_p1 + local_relative_reality_loss_p2
            # local_relative_reality_loss = torch.tensor(0.0)
            # print("time for computing local_relative_reality_loss: {}s".format(str(time.perf_counter() - time_1)))
            # time_1 = time.perf_counter()
            # 除了胳膊之外的关节相对root的平移向量的loss
            # global_reality_loss_p1 = torch.sum(torch.abs(joints_p1[:, weak_relative_joints] - origin_smplx_params["person1"]["joints"][ids][:, weak_relative_joints].detach()))
            # global_reality_loss_p2 = torch.sum(torch.abs(joints_p2[:, weak_relative_joints] - origin_smplx_params["person2"]["joints"][ids][:, weak_relative_joints].detach()))
            weak_relative_vector_p1 = joints_p1[:, weak_relative_joints] - joints_p1[:, 0].unsqueeze(1).repeat(1, len(weak_relative_joints), 1)
            gt_relative_vector_p1 = origin_smplx_params["person1"]["joints"][ids-start_frame][:, weak_relative_joints] - origin_smplx_params["person1"]["joints"][ids-start_frame][:, 0].unsqueeze(1).repeat(1, len(weak_relative_joints), 1)
            global_relative_reality_loss_p1 = torch.sum((weak_relative_vector_p1 - gt_relative_vector_p1)**2)
            weak_relative_vector_p2 = joints_p2[:, weak_relative_joints] - joints_p2[:, 0].unsqueeze(1).repeat(1, len(weak_relative_joints), 1)
            gt_relative_vector_p2 = origin_smplx_params["person2"]["joints"][ids-start_frame][:, weak_relative_joints] - origin_smplx_params["person2"]["joints"][ids-start_frame][:, 0].unsqueeze(1).repeat(1, len(weak_relative_joints), 1)
            global_relative_reality_loss_p2 = torch.sum((weak_relative_vector_p2 - gt_relative_vector_p2)**2)
            global_relative_reality_loss = global_relative_reality_loss_p1 + global_relative_reality_loss_p2
            # global_relative_reality_loss = torch.tensor(0.0)
            # print("time for computing global_relative_reality_loss: {}s".format(str(time.perf_counter() - time_1)))
            # time_1 = time.perf_counter()
            
            # xrf 0927 希望加一个以contact为中心的矫正头部的loss 模拟以contact area为中心
            gt_vector_hta_p1 = origin_smplx_params["person1"]["joints"][ids-start_frame][:, head_joint].repeat(1, len(hand_joints), 1) - origin_smplx_params["person1"]["joints"][ids-start_frame][:, hand_joints].detach()
            hta_p1 = joints_p1[:, head_joint].repeat(1, len(hand_joints), 1) - joints_p1[:, hand_joints]
            hta_loss_p1 = torch.sum((hta_p1 - gt_vector_hta_p1)**2)
            gt_vector_hta_p2 = origin_smplx_params["person2"]["joints"][ids-start_frame][:, head_joint].repeat(1, len(hand_joints), 1) - origin_smplx_params["person2"]["joints"][ids-start_frame][:, hand_joints].detach()
            hta_p2 = joints_p2[:, head_joint].repeat(1, len(hand_joints), 1) - joints_p2[:, hand_joints]
            hta_loss_p2 = torch.sum((hta_p2 - gt_vector_hta_p2)**2)
            hta_loss = hta_loss_p1 + hta_loss_p2
            # hta_loss = torch.tensor(0.0)


            v_loss_p1 = torch.sum(torch.abs(transl_p1[1:] - transl_p1[:-1] - (origin_smplx_params["person1"]["transl"][i-start_frame+1:i-start_frame+L].detach() - origin_smplx_params["person1"]["transl"][i-start_frame:i-start_frame+L-1].detach())))
            v_loss_p2 = torch.sum(torch.abs(transl_p2[1:] - transl_p2[:-1] - (origin_smplx_params["person2"]["transl"][i-start_frame+1:i-start_frame+L].detach() - origin_smplx_params["person2"]["transl"][i-start_frame:i-start_frame+L-1].detach())))
            if (i > start_frame) and (L >= 2):
                v_loss_to_init_p1 = torch.sum(torch.abs(transl_p1[0] - result_smplx_params[-1]["person1"]["transl"][0] - (origin_smplx_params["person1"]["transl"][i-start_frame].detach() - origin_smplx_params["person1"]["transl"][i-start_frame-1].detach()))) * (M / 5)
                v_loss_to_init_p2 = torch.sum(torch.abs(transl_p2[0] - result_smplx_params[-1]["person2"]["transl"][0] - (origin_smplx_params["person2"]["transl"][i-start_frame].detach() - origin_smplx_params["person2"]["transl"][i-start_frame-1].detach()))) * (M / 5)
            else:
                v_loss_to_init_p1 = 0.0
                v_loss_to_init_p2 = 0.0
            # print("time for computing v_loss: {}s".format(str(time.perf_counter() - time_1)))
            # time_1 = time.perf_counter()
            # feet loss
           
            # 判断脚是否在地上 如果在地上就对所有的脚部关节的速度进行惩罚
            feet_loss_p1 = torch.sum((joints_p1[:, feet_ground_joints - 5, 1] - origin_smplx_params["person1"]["joints"][ids-start_frame][:, feet_ground_joints, 1].detach())**2)
            feet_loss_p2 = torch.sum((joints_p2[:, feet_ground_joints - 5, 1] - origin_smplx_params["person2"]["joints"][ids-start_frame][:, feet_ground_joints, 1].detach())**2)
            
            # print("time for computing feet_loss: {}s".format(str(time.perf_counter() - time_1)))
            # time_1 = time.perf_counter()
            
            # print(((joints_p1[:, left_feet_ground_joints, 1].clamp(0.05, None) * torch.ge(origin_smplx_params["person1"]["joints"][ids-start_frame][:, left_feet_ground_joints, 1], 0.05))**2))
            # 如果脚在地上就对脚部的y进行约束
            # feet_loss_p1 = torch.sum((joints_p1[:, left_feet_ground_joints - 5, 1].clamp(0.0, None) * torch.ge(origin_smplx_params["person1"]["joints"][ids-start_frame][:, left_feet_ground_joints, 1], 0.05))**2) + torch.sum((joints_p1[:, right_feet_ground_joints - 5, 1].clamp(0.0, None) * torch.ge(origin_smplx_params["person1"]["joints"][ids-start_frame][:, right_feet_ground_joints, 1], 0.05))**2)
            # feet_loss_p2 = torch.sum((joints_p2[:, left_feet_ground_joints - 5, 1].clamp(0.0, None) * torch.ge(origin_smplx_params["person2"]["joints"][ids-start_frame][:, left_feet_ground_joints, 1], 0.05))**2) + torch.sum((joints_p2[:, right_feet_ground_joints - 5, 1].clamp(0.0, None) * torch.ge(origin_smplx_params["person2"]["joints"][ids-start_frame][:, right_feet_ground_joints, 1], 0.05))**2)

            # feet_v_loss_p1 = torch.sum((((joints_p1[1:, left_foot_joints - 5].index_select(2, x_z) - joints_p1[:-1, left_foot_joints - 5].index_select(2, x_z)) * p1_left_if_onground[:-1]) - ((origin_smplx_params["person1"]["joints"][i-start_frame+1:i-start_frame+L, left_foot_joints].index_select(2, x_z).detach() - origin_smplx_params["person1"]["joints"][i-start_frame:i-start_frame+L-1, left_foot_joints].index_select(2, x_z).detach()) * p1_left_if_onground[:-1]))**2) + torch.sum((((joints_p1[1:, right_foot_joints - 5].index_select(2, x_z) - joints_p1[:-1, right_foot_joints - 5].index_select(2, x_z)) * p1_right_if_onground[:-1]) - ((origin_smplx_params["person1"]["joints"][i-start_frame+1:i-start_frame+L, right_foot_joints].index_select(2, x_z).detach() - origin_smplx_params["person1"]["joints"][i-start_frame:i-start_frame+L-1, right_foot_joints].index_select(2, x_z).detach()) * p1_right_if_onground[:-1]))**2)
            # feet_v_loss_p2 = torch.sum((((joints_p2[1:, left_foot_joints - 5].index_select(2, x_z) - joints_p2[:-1, left_foot_joints - 5].index_select(2, x_z)) * p2_left_if_onground[:-1]) - ((origin_smplx_params["person2"]["joints"][i-start_frame+1:i-start_frame+L, left_foot_joints].index_select(2, x_z).detach() - origin_smplx_params["person2"]["joints"][i-start_frame:i-start_frame+L-1, left_foot_joints].index_select(2, x_z).detach()) * p2_left_if_onground[:-1]))**2) + torch.sum((((joints_p2[1:, right_foot_joints - 5].index_select(2, x_z) - joints_p2[:-1, right_foot_joints - 5].index_select(2, x_z)) * p2_right_if_onground[:-1]) - ((origin_smplx_params["person2"]["joints"][i-start_frame+1:i-start_frame+L, right_foot_joints].index_select(2, x_z).detach() - origin_smplx_params["person2"]["joints"][i-start_frame:i-start_frame+L-1, right_foot_joints].index_select(2, x_z).detach()) * p2_right_if_onground[:-1]))**2)
            
            # 正常人走路速度1.1-1.5m/s 10帧一秒 0.11-0.15m/帧 这个速度的3倍以上认为不合理
            # feet_v_loss_p1 = torch.sum(torch.abs((joints_p1[1:, left_foot_joints, 0] - joints_p1[:-1, left_foot_joints, 0]) * p1_left_if_onground[:-1]).clamp(0.4, None))
            # feet_loss_p1, feet_loss_p2 = torch.tensor(0.0), torch.tensor(0.0)
            # xrf trash code
            # dist = torch.nn.PairwiseDistance(p=2)
            # if torch.mean(origin_smplx_params["person1"]["joints"][ids-start_frame][:, feet_joints, 1]) < 0.3:
            #     # print("person 1 feet on ground")
            #     feet_loss_p1 = torch.sum(torch.abs(dist(joints_p1[1:, feet_joints, 0] - joints_p1[:-1, feet_joints, 0], joints_p1[1:, feet_joints, 2] - joints_p1[:-1, feet_joints, 2]) - dist(origin_smplx_params["person1"]["joints"][i-start_frame+1:i-start_frame+L][:, feet_joints, 0] - origin_smplx_params["person1"]["joints"][i-start_frame:i-start_frame+L-1][:, feet_joints, 0], origin_smplx_params["person1"]["joints"][i-start_frame+1:i-start_frame+L][:, feet_joints, 2] - origin_smplx_params["person1"]["joints"][i-start_frame:i-start_frame+L-1][:, feet_joints, 2])))
            # if torch.mean(origin_smplx_params["person2"]["joints"][ids-start_frame][:, feet_joints, 1]) < 0.3:
            #     # print("person 2 feet on ground")
            #     feet_loss_p2 = torch.sum(torch.abs(dist(joints_p2[1:, feet_joints, 0] - joints_p2[:-1, feet_joints, 0], joints_p2[1:, feet_joints, 2] - joints_p2[:-1, feet_joints, 2]) - dist(origin_smplx_params["person2"]["joints"][i-start_frame+1:i-start_frame+L][:, feet_joints, 0] - origin_smplx_params["person2"]["joints"][i-start_frame:i-start_frame+L-1][:, feet_joints, 0], origin_smplx_params["person2"]["joints"][i-start_frame+1:i-start_frame+L][:, feet_joints, 2] - origin_smplx_params["person2"]["joints"][i-start_frame:i-start_frame+L-1][:, feet_joints, 2])))
            
            # TODO batch smoothness loss
            batch_smoothness_loss = torch.tensor(0.0)
            # if init_smplx_params is not None:
            #     batch_smoothness_loss_p1 = torch.sum(torch.abs(body_pose_p1[0] - init_smplx_params["person1"]["body_pose"].detach())) + torch.sum(torch.abs(left_hand_pose_p1[0] - init_smplx_params["person1"]["left_hand_pose"].detach())) + torch.sum(torch.abs(right_hand_pose_p1[0] - init_smplx_params["person1"]["right_hand_pose"].detach()))
            #     batch_smoothness_loss_p2 = torch.sum(torch.abs(body_pose_p2[0] - init_smplx_params["person2"]["body_pose"].detach())) + torch.sum(torch.abs(left_hand_pose_p2[0] - init_smplx_params["person2"]["left_hand_pose"].detach())) + torch.sum(torch.abs(right_hand_pose_p2[0] - init_smplx_params["person2"]["right_hand_pose"].detach()))
            #     batch_smoothness_loss = batch_smoothness_loss_p1 + batch_smoothness_loss_p2

            # liuyun:
            # new obj vertices
            new_obj_vertices = retarget_obj_vert_seq[ids-start_frame]  # (L, N_obj, 3)
            # print(retarget_obj_vert_seq.shape)
            # contact_p1_loss = naive_contact_loss(origin_contact_info["person1"], ids-start_frame, vert_p1, new_obj_vertices)  # person 1 contact loss
            # contact_p2_loss = naive_contact_loss(origin_contact_info["person2"], ids-start_frame, vert_p2, new_obj_vertices)  # person 2 contact loss
            # print("time for computing contact_loss: {}s".format(str(time.perf_counter() - time_1)))
            # time_1 = time.perf_counter()

            # xrf 
            # 已经有了手部的vertex idx，我们可以得到closest point的dist 然后joints对在retargeting之后obj上的同样的点计算dist loss
            contact_p1_loss = naive_joint_contact_loss(origin_contact_info["person1"], ids-start_frame, joints_p1, new_obj_vertices, origin_smplx_params["person1"]["joints"])  # person 1 contact loss
            contact_p2_loss = naive_joint_contact_loss(origin_contact_info["person2"], ids-start_frame, joints_p2, new_obj_vertices, origin_smplx_params["person2"]["joints"])


            regularizer = regularizer_p1 + regularizer_p2
            smoothness = smoothness_p1 + smoothness_p2  + smoothness_to_init_p1 + smoothness_to_init_p2
            contact_loss = contact_p1_loss + contact_p2_loss
            # contact_loss = torch.tensor(0.0)
            v_loss = v_loss_p1 + v_loss_p2 + v_loss_to_init_p1 + v_loss_to_init_p2
            feet_loss = feet_loss_p1 + feet_loss_p2
            # feet_loss = feet_v_loss_p1 + feet_v_loss_p2 + feet_loss_p1 + feet_loss_p2

            loss = lambda_regularizer * regularizer + lambda_smoothness * smoothness \
                + lambda_reality[0] * local_rotation_reality_loss \
                + lambda_reality[1] * local_relative_reality_loss + lambda_reality[2] * global_relative_reality_loss \
                + lambda_contact * contact_loss + lambda_v * v_loss + lambda_feet * feet_loss + lambda_arm_loss * arm_loss + 1e-2 * hta_loss
            if epoch % 100 == 0 or (epoch == N_epoch - 1):
                print(epoch, loss.item(), lambda_regularizer * regularizer.item(), lambda_smoothness * smoothness.item(), \
                    lambda_reality[0] * local_rotation_reality_loss.item(), \
                    lambda_reality[1] * local_relative_reality_loss.item(), lambda_reality[2] * global_relative_reality_loss.item(), \
                    lambda_contact * contact_loss.item(), lambda_v * v_loss.item(), lambda_feet * feet_loss.item(), lambda_arm_loss * arm_loss.item(), 1e-2 * hta_loss.item())
            loss_rec.append(loss.item())
            loss.backward()
            optimizer.step()
            if loss.item() < 0.5 and epoch > 1000:
                print("loss < 0.5, break")
                break
        
        optim_model.eval()
        results = optim_model()

        epoch_end_time = time.perf_counter()
        print("time for one epoch: {}s".format(str(epoch_end_time - epoch_start_time)))
        # draw
        # plt.plot(loss_rec)
        # plt.savefig(join(save_dir, f"{i}-{i+L-1}_adam.jpg"))

        for j in range(L):
            result_smplx_params.append({
                "person1": {
                    "betas": results["person1"]["betas"][j:j+1].detach(),
                    "expression": results["person1"]["expression"][j:j+1].detach(),
                    "global_orient": results["person1"]["global_orient"][j:j+1].detach(),
                    "transl": results["person1"]["transl"][j:j+1].detach(),
                    "body_pose": results["person1"]["body_pose"][j:j+1].detach(),
                    "left_hand_pose": results["person1"]["left_hand_pose"][j:j+1].detach(),
                    "right_hand_pose": results["person1"]["right_hand_pose"][j:j+1].detach(),
                },
                "person2": {
                    "betas": results["person2"]["betas"][j:j+1].detach(),
                    "expression": results["person2"]["expression"][j:j+1].detach(),
                    "global_orient": results["person2"]["global_orient"][j:j+1].detach(),
                    "transl": results["person2"]["transl"][j:j+1].detach(),
                    "body_pose": results["person2"]["body_pose"][j:j+1].detach(),
                    "left_hand_pose": results["person2"]["left_hand_pose"][j:j+1].detach(),
                    "right_hand_pose": results["person2"]["right_hand_pose"][j:j+1].detach(),
                },
            })
            # print("p1 left height", results["person1"]["joints"][j:j+1, 57, 1].detach().item(), "p1 right height", results["person1"]["joints"][j:j+1, 60, 1].detach().item())
            # print("origin p1 left height", origin_smplx_params["person1"]["joints"][(ids-start_frame)[0]+j:(ids-start_frame)[0]+j+1, 62, 1].detach().item(), "origin p1 right height", origin_smplx_params["person1"]["joints"][(ids-start_frame)[0]+j:(ids-start_frame)[0]+j+1, 65, 1].detach().item())
            
        init_smplx_params = {
            "person1": {
                "betas": results["person1"]["betas"][-1:].detach(),
                "expression": results["person1"]["expression"][-1:].detach(),
                "global_orient": results["person1"]["global_orient"][-1:].detach(),
                "transl": results["person1"]["transl"][-1:].detach(),
                "body_pose": results["person1"]["body_pose"][-1:].detach(),
                "left_hand_pose": results["person1"]["left_hand_pose"][-1:].detach(),
                "right_hand_pose": results["person1"]["right_hand_pose"][-1:].detach(),
            },
            "person2": {
                "betas": results["person2"]["betas"][-1:].detach(),
                "expression": results["person2"]["expression"][-1:].detach(),
                "global_orient": results["person2"]["global_orient"][-1:].detach(),
                "transl": results["person2"]["transl"][-1:].detach(),
                "body_pose": results["person2"]["body_pose"][-1:].detach(),
                "left_hand_pose": results["person2"]["left_hand_pose"][-1:].detach(),
                "right_hand_pose": results["person2"]["right_hand_pose"][-1:].detach(),
            }
        }

            
    print(len(result_smplx_params))
    # (4) save and visualization
    # np.savez(join(save_dir, "person1_SMPLX_params.npz"), results=HOI_retargeting_result["person1_SMPLX_params"])
    # np.savez(join(save_dir, "person2_SMPLX_params.npz"), results=HOI_retargeting_result["person2_SMPLX_params"])
    # TODO: visualization
    return result_smplx_params


def parse_args():
    parser = argparse.ArgumentParser()
    ############# 数据 ########################
    # parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230807_2/002")  # 搬桌子
    # parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230807_2/004")  # 旋转桌子
    parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230807_1/017")  # 搬棍子
    # parser.add_argument('--data_dir', type=str, default="/share/datasets/HHO_dataset/data/20230805_1/002")  # 旋转椅子
    parser.add_argument('--start_frame', '-s', type=int, default=0)
    parser.add_argument('--end_frame', '-e', type=int, default=300)
    parser.add_argument('--save_filename', type=str, default="compare_result.mp4")
    parser.add_argument('--device', type=str, default="cuda:0")
    parser.add_argument('--scale_x', type=float, default=1.0)
    parser.add_argument('--scale_y', type=float, default=1.0)
    parser.add_argument('--scale_z', type=float, default=1.0)
    parser.add_argument('--use_new_obj', action="store_true")  # true则使用new_obj
    parser.add_argument('--new_obj_id', type=str, default="")
    parser.add_argument('--force_retarget_obj', action="store_true")  # true则强制重新做obj_retargeting
    parser.add_argument('--objpose_filename', type=str, default="obj_poses.npy")
    parser.add_argument('--objmesh_filename', type=str, default="new_obj.obj")
    parser.add_argument('--npy_save_filename', type=str, default="HOI_retargeting_result.npy")
    args = parser.parse_args()
    return args


if __name__ == '__main__':
    
    #########################################################################################
    obj_dataset_dir = "/share/datasets/HHO_object_dataset_final"
    # obj_data_path = "test/chair10_m.obj"
    cfg = {
        "retarget_person1": True,
        "retarget_person2": True,
    }
    #########################################################################################
    # args = parse_args()
    # start_frame, end_frame = args.start_frame, args.end_frame
    # data_root_file = args.data_dir
    # device = args.device
    # for dir in os.listdir(data_root_file):
    #     data_dir = join(data_root_file, dir)
    #     if isfile(data_dir):
    #         continue
    #     print(data_dir)
        
    #     # 20230802_1
    #     if data_root_file == '/share/datasets/HHO_dataset/data/20230802_1/' and   dir in ['047','043','033','026','050','045','024','046','048']:
    #         continue

    #     # 20230804_1
    #     if data_root_file == '/share/datasets/HHO_dataset/data/20230804_1/' and   dir in ['048','051','050','049','054']:
    #         continue
        
    #     # 20230805_1
    #     # if not dir in ['000', '001', '002', '003', '015']:
    #     #     continue
        
    #     # # 20230805_2
    #     # if not dir in ['000', '001', '002', '003', '004', '005', '006', '007', '008', '009', '010', '011', '012', '013', '014','015', '016', '017', '018', '019', '020', '021', '022', '023', '024', '025', '027', '028', '029', '030', '031', '032', '033', '034']:
    #     #     continue
    #     if data_root_file == '/share/datasets/HHO_dataset/data/20230805_2/' and   dir in ['004', '033','034','035','030','029']:
    #         continue
        
    #     # 20230806_1
    #     # if not dir in ['000', '001', '002', '003', '004', '029', '030', '031', '032']:
    #         # continue
    #     if data_root_file == '/share/datasets/HHO_dataset/data/20230806_1/' and   dir in ['040','036','043','048','041','037','039']:
    #         continue
            
    #     # 20230806_2
    #     # if not dir in ['000', '001', '002', '003', '004', '005', '006', '007', '008', '009', '010', '012', '027', '028', '029', '030', '031', '032', '033']:
    #     #     continue
    #     if data_root_file == '/share/datasets/HHO_dataset/data/20230806_2/'and dir in ['047', '048','051','050','046','049']:
    #         continue
        
    #     # 20230807_1
    #     # if not dir in ['000', '001', '002', '003', '004', '005', '006', '007', '008', '009', '010', '012', '013', '047', '048', '049', '050', '051', '052', '053', '054', '055']:
    #     #     continue
    #     if data_root_file == '/share/datasets/HHO_dataset/data/20230807_1/' and dir in ['071','075','0']:
    #         continue

    #     # 20230807_2
    #     if data_root_file == '/share/datasets/HHO_dataset/data/20230807_2/' and dir in ['068','071','058']:
    #         continue
        
    #     obj_retargeting_save_dir = join(data_dir, "obj_retargeting")
    #     HOI_retargeting_save_dir = join(data_dir, "HOI_retargeting")
    #     for i in range(3):
    #         for scale in range(14, 31, 4):
    #             if i == 0:
    #                 q_mat = torch.from_numpy(np.array([[scale/10, 0, 0, 0], [0, 1, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]))
    #                 # result_save_dir = join(HOI_retargeting_save_dir, args.npy_save_filename)
    #                 # objpose_save_file = join(obj_retargeting_save_dir, args.objpose_filename)
    #                 # objmesh_save_file = join(obj_retargeting_save_dir, args.objmesh_filename)
    #                 result_save_dir = join(HOI_retargeting_save_dir, str(scale)+"x.npy")
    #                 if os.path.exists(result_save_dir):
    #                     print("already exists", result_save_dir)
    #                     continue
    #                 objpose_save_file = join(obj_retargeting_save_dir, str(scale)+"x.npy")
    #                 objmesh_save_file = join(obj_retargeting_save_dir, str(scale)+"x.obj")
    #             elif i == 1:
    #                 q_mat = torch.from_numpy(np.array([[1, 0, 0, 0], [0, scale/10, 0, 0], [0, 0, 1, 0], [0, 0, 0, 1]]))
    #                 result_save_dir = join(HOI_retargeting_save_dir, str(scale)+"y.npy")
    #                 if os.path.exists(result_save_dir):
    #                     print("already exists", result_save_dir)
    #                     continue
    #                 objpose_save_file = join(obj_retargeting_save_dir, str(scale)+"y.npy")
    #                 objmesh_save_file = join(obj_retargeting_save_dir, str(scale)+"y.obj")
                    
    #             elif i == 2:
    #                 q_mat = torch.from_numpy(np.array([[1, 0, 0, 0], [0, 1, 0, 0], [0, 0, scale/10, 0], [0, 0, 0, 1]]))
    #                 result_save_dir = join(HOI_retargeting_save_dir, str(scale)+"z.npy")
    #                 if os.path.exists(result_save_dir):
    #                     print("already exists", result_save_dir)
    #                     continue
    #                 objpose_save_file = join(obj_retargeting_save_dir, str(scale)+"z.npy")
    #                 objmesh_save_file = join(obj_retargeting_save_dir, str(scale)+"z.obj")
    #             # device = args.device
    #             # start_frame, end_frame = args.start_frame, args.end_frame
    #             print(data_dir, "[", start_frame, end_frame, ")")
    #             print("q_mat", q_mat)
    #             try:
    #                 obj_name, obj_data_path = get_obj_info(data_dir, obj_dataset_dir)
    #             except:
    #                 continue
    #             print(obj_name, obj_data_path)
    #             assert isfile(obj_data_path)
                
    #             # object retargeting
    #             if args.force_retarget_obj or (not isfile(objpose_save_file)):
    #                 os.makedirs(obj_retargeting_save_dir, exist_ok=True)
    #                 obj_retargeting_result = obj_retargeting(data_dir, obj_data_path, q_mat, start_frame=start_frame, end_frame=end_frame, device=device)
    #                 save_mesh(objmesh_save_file, obj_retargeting_result["obj_mesh"])
    #                 np.save(objpose_save_file, obj_retargeting_result["obj_poses"])
                
    #             origin_mesh = trimesh.load_mesh(obj_data_path)
    #             obj_mesh = trimesh.load_mesh(objmesh_save_file)
    #             obj_poses = np.load(objpose_save_file, allow_pickle=True)
    #             end_frame = min(end_frame, len(obj_poses))
    #             print("end_frame is set to: ", end_frame)
    #             # HOI retargeting
    #             try:
    #                 if not isfile(result_save_dir):
    #                     HOI_retargeting_result = HOI_retargeting(data_dir, HOI_retargeting_save_dir, origin_mesh, obj_mesh, obj_poses, start_frame=start_frame, end_frame=end_frame, cfg=cfg, device=device)
    #                     np.save(result_save_dir, HOI_retargeting_result)
    #                 # HOI_retargeting_result = HOI_retargeting(data_dir, HOI_retargeting_save_dir, origin_mesh, obj_mesh, obj_poses, start_frame=start_frame, end_frame=end_frame, cfg=cfg, device=device)
    #                 # np.save(result_save_dir, HOI_retargeting_result)

    #                 # visualization
    #                 # HOI_retargeting_result = np.load(join(HOI_retargeting_save_dir, "HOI_retargeting_result.npy"), allow_pickle=True)
    #                 # HOI_visualization(result_save_dir, objpose_save_file, objmesh_save_file, obj_dataset_dir, data_dir, data_dir, start_frame, end_frame, save_filename=args.save_filename, device="cuda:0")
    #             except Exception as e:
    #                 print(e)
    #                 pass

    
    args = parse_args()
    
    
    data_dir = args.data_dir
    device = args.device
    start_frame, end_frame = args.start_frame, args.end_frame

    if len(np.load(join(data_dir, 'aligned_objposes.npy'), allow_pickle=True)) < end_frame:
        end_frame = len(np.load(join(data_dir, 'aligned_objposes.npy'), allow_pickle=True))
        print("Warning: end_frame is larger than the length of object_result, set end_frame to {}".format(str(end_frame)))

    print(data_dir, "[", start_frame, end_frame, ")")
    
    obj_retargeting_save_dir = join(data_dir, "obj_retargeting")
    HOI_retargeting_save_dir = join(data_dir, "HOI_retargeting")

    result_save_dir = join(HOI_retargeting_save_dir, args.npy_save_filename)

    objpose_save_file = join(obj_retargeting_save_dir, args.objpose_filename)
    objmesh_save_file = join(obj_retargeting_save_dir, args.objmesh_filename)
    
    obj_name, obj_data_path = get_obj_info(data_dir, obj_dataset_dir)
    print(obj_name, obj_data_path)
    assert isfile(obj_data_path)

    
    # object retargeting
    if args.force_retarget_obj or (not isfile(objpose_save_file)) and not args.use_new_obj:
        q_mat = torch.from_numpy(np.array([[args.scale_x, 0, 0, 0], [0, args.scale_y, 0, 0], [0, 0, args.scale_z, 0], [0, 0, 0, 1]]))
        print("q_mat", q_mat)
        os.makedirs(obj_retargeting_save_dir, exist_ok=True)
        obj_retargeting_result = obj_retargeting(data_dir, obj_data_path, q_mat, start_frame=start_frame, end_frame=end_frame, device=device)
        save_mesh(objmesh_save_file, obj_retargeting_result["obj_mesh"])
        np.save(objpose_save_file, obj_retargeting_result["obj_poses"])
    
    if (args.force_retarget_obj or (not isfile(objpose_save_file))) and args.use_new_obj:
        os.makedirs(obj_retargeting_save_dir, exist_ok=True)
        target_data_path = get_obj_path(args.new_obj_id)
        obj_retargeting_result = obj_retargeting_new_obj(data_dir, obj_data_path, target_data_path, start_frame=start_frame, end_frame=end_frame, device=device)
        save_mesh(objmesh_save_file, obj_retargeting_result["obj_mesh"])
        np.save(objpose_save_file, obj_retargeting_result["obj_poses"])
    
    # obj_visualization(objmesh_save_file, objpose_save_file, data_dir, start_frame, end_frame, device="cuda:0")


    origin_mesh = trimesh.load_mesh(obj_data_path)
    obj_mesh = trimesh.load_mesh(objmesh_save_file)
    obj_poses = np.load(objpose_save_file, allow_pickle=True)
    # HOI retargeting
    HOI_retargeting_result = HOI_retargeting(data_dir, HOI_retargeting_save_dir, origin_mesh, obj_mesh, obj_poses, start_frame=start_frame, end_frame=end_frame, cfg=cfg, device=device, use_new_obj=args.use_new_obj)
    np.save(result_save_dir, HOI_retargeting_result)

    # visualization
    # HOI_retargeting_result = np.load(join(HOI_retargeting_save_dir, "HOI_retargeting_result.npy"), allow_pickle=True)
    HOI_visualization(result_save_dir, objpose_save_file, objmesh_save_file, obj_dataset_dir, data_dir, data_dir, start_frame, end_frame, save_filename=args.save_filename, device="cuda:0")
