import os
from os.path import join, dirname, isdir, isfile
import numpy as np
import torch
import json
import trimesh
from transforms3d.axangles import mat2axangle
from scipy.spatial.transform  import Rotation as sRot
from UniHSI_utils.load_data import load_unihsi_date_one_sequence
from mesh import merge_mesh, save_mesh
from h1_kinematics import H1_Motion_Model, load_urdf, get_h1_link_names, forward_kinematics, save_predicted_h1_motion
from utils.rotation import rot_decompose
import open3d as o3d


def get_joint_position_correspondence():
    """
    [h1 joint index, corresponding UniHSI joint index, father index of the H1 joint on the abstract skeleton, father index of the UniHSI joint on the abstract skeleton]
    """
    joint_correspondence = [
        [0, 0, -1, -1],
        [1, 12, 0, 0],
        [2, 12, 0, 0],
        # [3, 12],
        [4, 13, 2, 12],
        [5, 14, 4, 13],
        [6, 9, 0, 0],
        [7, 9, 0, 0],
        # [8, 9],
        [9, 10, 7, 9],
        [10, 11, 9, 11],
        [11, 0, -1, -1],
        # [12, 6],
        [13, 6, 11, 0],
        # [14, 7],
        [15, 7, 13, 6],
        # [16, 3],
        [17, 3, 11, 0],
        # [18, 4],
        [19, 4, 17, 3],
    ]
    return joint_correspondence


def get_joint_orientation_correspondence():
    """
    [h1 joint index, corresponding UniHSI joint index]
    """
    joint_correspondence = [
        [0, 0],
        # [1, 12],
        # [2, 12],
        # [3, 12],
        [4, 13],
        [5, 14],
        # [6, 9],
        # [7, 9],
        # [8, 9],
        [9, 10],
        [10, 11],
        # [11, 0],
        # [12, 6],
        # [13, 6],
        # [14, 7],
        # [15, 8],
        # [16, 3],
        # [17, 3],
        # [18, 4],
        # [19, 5],
    ]
    return joint_correspondence


def preprocess_gt_info_unihsi(unihsi_motion, device="cuda:0"):
    N_frame = unihsi_motion.shape[0]
    gt_joint_positions = torch.from_numpy(unihsi_motion[:, :, :3, 3]).to(device)  # (N, 15, 3)

    h1_to_unihsi = torch.tensor([[1,0,0],[0,1,0],[0,0,1]]).to(dtype=torch.float32).to(device)
    gt_joint_orientations = torch.einsum('bcij,bcjk->bcik', torch.from_numpy(unihsi_motion[:, :, :3, :3]).to(device), h1_to_unihsi.reshape(1, 1, 3, 3))  # (N, 15, 3, 3)

    # move pelvis
    gt_joint_positions[:, 0] += torch.matmul(gt_joint_orientations[:, 0], torch.tensor([0, 0, 0.20]).to(dtype=torch.float32).to(device).reshape(3, 1)).reshape(N_frame, 3)

    return N_frame, gt_joint_positions, gt_joint_orientations


def align_bone_length_to_H1(N_frame, gt_joint_positions, gt_joint_orientations):
    # prepare info
    joint_correspondence = get_joint_position_correspondence()
    H1_link_position_example = np.load("./utils/20link_pose_example.npy")[:, :3, 3]  # (20, 3)

    device = gt_joint_positions.device
    gt_joint_positions = gt_joint_positions.detach().cpu()

    aligned_gt_joint_positions = torch.zeros(gt_joint_positions.shape)
    for frame_idx in range(N_frame):
        gt = gt_joint_positions[frame_idx]  # (15, 3)
        delta = torch.zeros(gt.shape)
        # get the bias for each joint
        for joint_corr in joint_correspondence:
            curr_h1, curr_human, parent_h1, parent_human = joint_corr
            if parent_human == -1:
                continue
            h1_bone_length = ((H1_link_position_example[curr_h1] - H1_link_position_example[parent_h1])**2).sum()**0.5
            curr_human_bone = gt[curr_human] - gt[parent_human]
            delta[curr_human] = h1_bone_length * (curr_human_bone / torch.clamp(curr_human_bone.norm(p=None), 1e-6, None)) - curr_human_bone
        # accumulate the bias
        for joint_corr in joint_correspondence:
            _, curr_human, _, parent_human = joint_corr
            if parent_human == -1:
                continue
            delta[curr_human] += delta[parent_human]
        delta[8] += delta[7]
        delta[5] += delta[4]
        # save
        aligned_gt_joint_positions[frame_idx] = gt + delta

    aligned_gt_joint_positions = aligned_gt_joint_positions.to(dtype=gt_joint_positions.dtype).to(device)
    return N_frame, aligned_gt_joint_positions, gt_joint_orientations


def complete_h1_motion(h1_motion, chain, link_names, device="cuda:0"):
    pred_link_to_world_dict = forward_kinematics(chain, link_names, h1_motion["joint_angles"], global_rotation=h1_motion["global_rotations"], global_translation=h1_motion["global_translations"], device=device)
    pred_link_to_world_dict_np = {}
    for link_name in pred_link_to_world_dict:
        pred_link_to_world_dict_np[link_name] = pred_link_to_world_dict[link_name].detach().cpu().numpy()  # (N_frame, 4, 4)
    h1_motion["link_global_poses"] = pred_link_to_world_dict_np
    return h1_motion


def optimization_unihsi_to_h1(chain, link_names, unihsi_motion, method_name="normal_optimization", device="cuda:0"):
    """
    chain: h1 chain
    link_names: h1 link names
    unihsi_motion: a np.float32 representing the pose of each link in each frame in world space, shape = (N, 15, 4, 4)

    return: {"joint_angles": (N, 19), "global_rotation": (N, 3), "global_orientation": (N, 3)}
    """

    N_frame, gt_joint_positions, gt_joint_orientations = preprocess_gt_info_unihsi(unihsi_motion, device=device)
    if method_name == "omnih2o":
        print("hand positions before:", gt_joint_positions[-1, 5], gt_joint_positions[-1, 8])
        N_frame, gt_joint_positions, gt_joint_orientations = align_bone_length_to_H1(N_frame, gt_joint_positions, gt_joint_orientations)
        print("hand positions after:", gt_joint_positions[-1, 5], gt_joint_positions[-1, 8])

    ########################## start optimization #################################
    init_global_translations = gt_joint_positions[:, 0].clone()  # (N, 3)
    init_global_rotations = []
    for i in range(N_frame):
        axis, angle = mat2axangle(gt_joint_orientations[i, 0].detach().cpu().numpy(), unit_thresh=1e-3)
        init_global_rotations.append(axis * angle)
    init_global_rotations = torch.from_numpy(np.float32(init_global_rotations)).to(device)  # (N, 3)
    h1_motion_model = H1_Motion_Model(batch_size=N_frame, init_global_translations=init_global_translations, init_global_rotations=init_global_rotations, device=device)

    optimizer = torch.optim.Adam(h1_motion_model.parameters(), lr=2e-2)
    h1_motion_model.train()

    joint_position_corrs = get_joint_position_correspondence()
    joint_orientation_corrs = get_joint_orientation_correspondence()

    for epoch in range(1000):
        h1_motion = h1_motion_model()

        pred_link_to_world_dict = forward_kinematics(chain, link_names, h1_motion["joint_angles"], global_rotation=h1_motion["global_rotations"], global_translation=h1_motion["global_translations"], device=device)

        joint_global_position_loss = 0
        for joint_corr in joint_position_corrs:
            if ("pelvis" in link_names[joint_corr[0]]) or ("ankle" in link_names[joint_corr[0]]):
                scale = 5.0
            else:
                scale = 1.0
            joint_global_position_loss += scale * ((pred_link_to_world_dict[link_names[joint_corr[0]]][:, :3, 3] - gt_joint_positions[:, joint_corr[1]])**2).sum(dim=-1).mean()
        
        pred_joint_angles = h1_motion["joint_angles"]
        pred_joint_velocities = pred_joint_angles[1:] - pred_joint_angles[:-1]
        pred_joint_accelerations = pred_joint_velocities[1:] - pred_joint_velocities[:-1]
        pred_root_linear_velocities = pred_link_to_world_dict["pelvis"][1:, :3, 3] - pred_link_to_world_dict["pelvis"][:-1, :3, 3]
        pred_root_linear_acceleration = pred_root_linear_velocities[1:] - pred_root_linear_velocities[:-1]
        joint_local_velocity_loss = pred_joint_velocities.abs().sum(dim=-1).mean()
        joint_local_acceleration_loss = pred_joint_accelerations.abs().sum(dim=-1).mean()
        root_global_linear_acceleration_loss = (pred_root_linear_acceleration**2).sum(dim=-1).mean()

        # joint global rotation loss
        joint_global_orientation_loss = 0
        for joint_corr in joint_orientation_corrs:
            pred_R = pred_link_to_world_dict[link_names[joint_corr[0]]][:, :3, :3]  # (N, 3, 3)
            gt_R = gt_joint_orientations[:, joint_corr[1]]  # (N, 3, 3)
            joint_global_orientation_loss += (pred_R - gt_R).abs().sum(dim=-1).sum(dim=-1).mean()

        # TODO: add contact loss

        loss = 1.0 * joint_global_position_loss + 0.1 * joint_local_acceleration_loss + 1.0 * joint_global_orientation_loss

        if epoch % 100 == 0:
            print(epoch, loss.item(), joint_global_position_loss.item(), joint_local_acceleration_loss.item(), joint_global_orientation_loss.item())

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    
    ########################## finish optimization ################################

    h1_motion = h1_motion_model()
    h1_motion = complete_h1_motion(h1_motion, chain, link_names, device=device)
    return h1_motion


def copy_rotation_unihsi_to_h1(chain, link_names, unihsi_motion, device="cuda:0"):
    """
    chain: h1 chain
    link_names: h1 link names
    unihsi_motion: a np.float32 representing the pose of each link in each frame in world space, shape = (N, 15, 4, 4)

    return: {"joint_angles": (N, 19), "global_rotation": (N, 3), "global_orientation": (N, 3)}
    """

    N_frame, gt_joint_positions, gt_joint_orientations = preprocess_gt_info_unihsi(unihsi_motion, device=device)

    ########################## start copy rotation #################################
    global_translations = gt_joint_positions[:, 0].clone()  # (N, 3)
    global_rotations = []
    for i in range(N_frame):
        axis, angle = mat2axangle(gt_joint_orientations[i, 0].detach().cpu().numpy(), unit_thresh=1e-3)
        global_rotations.append(axis * angle)
    global_rotations = torch.from_numpy(np.float32(global_rotations)).to(device)  # (N, 3)

    joint_angles = torch.zeros(N_frame, 19).to(dtype=torch.float32).to(device)
    # thighs
    R_left_thigh_to_pelvis = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 0].permute(0, 2, 1), gt_joint_orientations[:, 12])
    R_right_thigh_to_pelvis = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 0].permute(0, 2, 1), gt_joint_orientations[:, 9])
    euler = torch.from_numpy(sRot.from_matrix(R_left_thigh_to_pelvis.detach().cpu().numpy()).as_euler('yxz'))[:, [2,1,0]]
    joint_angles[:, 0:3] = euler
    euler = torch.from_numpy(sRot.from_matrix(R_right_thigh_to_pelvis.detach().cpu().numpy()).as_euler('yxz'))[:, [2,1,0]]
    joint_angles[:, 5:8] = euler
    # knees
    R_left_knee_to_thigh = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 12].permute(0, 2, 1), gt_joint_orientations[:, 13])
    R_right_knee_to_thigh = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 9].permute(0, 2, 1), gt_joint_orientations[:, 10])
    euler = torch.from_numpy(sRot.from_matrix(R_left_knee_to_thigh.detach().cpu().numpy()).as_euler('yzx'))[:, 0]
    joint_angles[:, 3] = euler
    euler = torch.from_numpy(sRot.from_matrix(R_right_knee_to_thigh.detach().cpu().numpy()).as_euler('yzx'))[:, 0]
    joint_angles[:, 8] = euler
    # ankles
    R_left_ankle_to_knee = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 13].permute(0, 2, 1), gt_joint_orientations[:, 14])
    R_right_ankle_to_knee = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 10].permute(0, 2, 1), gt_joint_orientations[:, 11])
    euler = torch.from_numpy(sRot.from_matrix(R_left_ankle_to_knee.detach().cpu().numpy()).as_euler('yzx'))[:, 0]
    joint_angles[:, 4] = euler
    euler = torch.from_numpy(sRot.from_matrix(R_right_ankle_to_knee.detach().cpu().numpy()).as_euler('yzx'))[:, 0]
    joint_angles[:, 9] = euler
    # torso
    R_torso_to_pelvis = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 0].permute(0, 2, 1), gt_joint_orientations[:, 1])
    euler = torch.from_numpy(sRot.from_matrix(R_torso_to_pelvis.detach().cpu().numpy()).as_euler('zxy'))[:, 0]
    joint_angles[:, 10] = euler
    # shoulders and elbows
    theta = np.pi / 180 * 25
    left_shoulder_pitch_rot = sRot.from_euler('x', theta)
    left_shoulder_pitch_axis = left_shoulder_pitch_rot.apply([0,1,0])
    right_shoulder_pitch_rot = sRot.from_euler('x', -theta)
    right_shoulder_pitch_axis = right_shoulder_pitch_rot.apply([0,1,0])
    R_left_shoulder_to_torso = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 1].permute(0, 2, 1), gt_joint_orientations[:, 6]).detach().cpu().numpy()
    Ryp_theta, Rxz = rot_decompose(R_left_shoulder_to_torso, left_shoulder_pitch_axis)
    Rx_theta, Rz = rot_decompose(Rxz, np.float64([1,0,0]))
    Rz_theta, _ = rot_decompose(Rz, np.float64([0,0,1]))
    # euler = torch.stack([Ryp_theta, Rx_theta + np.pi / 2, Rz_theta], dim=-1)
    euler = torch.stack([Ryp_theta, Rx_theta, Rz_theta], dim=-1)
    joint_angles[:, 11:14] = euler
    R_right_shoulder_to_torso = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 1].permute(0, 2, 1), gt_joint_orientations[:, 3]).detach().cpu().numpy()
    Ryp_theta, Rxz = rot_decompose(R_right_shoulder_to_torso, right_shoulder_pitch_axis)
    Rx_theta, Rz = rot_decompose(Rxz, np.float64([1,0,0]))
    Rz_theta, _ = rot_decompose(Rz, np.float64([0,0,1]))
    # euler = torch.stack([Ryp_theta, Rx_theta - np.pi / 2, Rz_theta], dim=-1)
    euler = torch.stack([Ryp_theta, Rx_theta, Rz_theta], dim=-1)
    joint_angles[:, 15:18] = euler
    R_left_elbow_to_shoulder = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 6].permute(0, 2, 1), gt_joint_orientations[:, 7]).detach().cpu().numpy()
    left_elbow_angle, left_elbow_R2 = rot_decompose(R_left_elbow_to_shoulder, np.float64([0,0,1]))
    left_shoulder_conpen, _ = rot_decompose(left_elbow_R2, np.float64([1,0,0]))
    joint_angles[:, 13] += left_shoulder_conpen.to(device)
    # joint_angles[:, 14] = left_elbow_angle + np.pi/2
    R_right_elbow_to_shoulder = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 3].permute(0, 2, 1), gt_joint_orientations[:, 4]).detach().cpu().numpy()
    right_elbow_angle, right_elbow_R2 = rot_decompose(R_right_elbow_to_shoulder, np.float64([0,0,-1]))
    right_shoulder_conpen, _ = rot_decompose(right_elbow_R2, np.float64([1,0,0]))
    joint_angles[:, 17] += right_shoulder_conpen.to(device)
    # joint_angles[:, 18] = right_elbow_angle + np.pi/2
    ########################## finish copy rotation ################################

    h1_motion = {
        "joint_angles": joint_angles,
        "global_rotations": global_rotations,
        "global_translations": global_translations,
    }
    h1_motion = complete_h1_motion(h1_motion, chain, link_names, device=device)
    return h1_motion


def retarget_unihsi_to_h1(chain, link_names, unihsi_motion, retargeting_method="optimization", device="cuda:0"):
    if retargeting_method == "optimization":
        return optimization_unihsi_to_h1(chain, link_names, unihsi_motion, device=device)
    elif retargeting_method == "omnih2o":
        return optimization_unihsi_to_h1(chain, link_names, unihsi_motion, method_name="omnih2o", device=device)
    elif retargeting_method == "copy_rotation":
        return copy_rotation_unihsi_to_h1(chain, link_names, unihsi_motion, device=device)
    else:
        raise NotImplementedError


if __name__ == "__main__":

    ################################ change here ###############################################
    # NOTE: 这一步得到的motion的坐标系和kinematic data一致，跑完之后还需要跑align_retargeted_data.py把结果按object pose对齐
    unihsi_data_root = "/home/liuyun/Humanoid_IL_Benchmark/Kinematic_Data/partnet_UniHSI_chair_AUG_0814"
    flag_augmentation = unihsi_data_root.find("_AUG_") > -1  # 兼容object augmentation前后两种文件格式
    unihsi_data_type = "sit"  # NOTE: sit / lie
    unihsi_object_type = "chair"
    use_whole_motion = True  # if True, then: walk+sit = sit, walk+sit+lie = lie
    save_dir = "./omnih2o_UniHSI_retargeted_data_augmented_" + unihsi_object_type + "_" + unihsi_data_type + "_notalign"
    shapenet_dir = "/media/liuyun/TOSHIBA EXT/ShapeNet-v2"
    h1_urdf_path = "/home/liuyun/Humanoid_IL_Benchmark/retargeting/assets/h1_description/urdf/h1.urdf"
    retargeting_method = "omnih2o"  # NOTE: optimization / copy_rotation / omnih2o

    start_frame, end_frame = [-150, -1]  # 每段原始数据截取[start_frame, end_frame]的范围做retargeting, 负数表示倒数的第几帧
    sampling_rate = 1  # 每隔多少帧抽一帧做retargeting

    device = "cuda:0"
    N_clip = 1 # 如果要多进程并行，这里指定一共分多少batch
    clip_idx = 0  # 当前进程做哪个batch
    ############################################################################################

    chain = load_urdf(h1_urdf_path, device=device)
    link_names = get_h1_link_names()

    partnet_meta_info = json.load(open(join(unihsi_data_root, "partnet_chair_bed.json"), "r"))
    partnet_to_shapenet_dict = {x[0] : join(shapenet_dir, x[2], x[3], "models/model_normalized.obj") for x in partnet_meta_info}

    seq_paths = []
    for scene_name in os.listdir(unihsi_data_root):
        scene_dir = join(unihsi_data_root, scene_name)
        if not isdir(scene_dir):
            continue
        if not flag_augmentation:
            for fn in os.listdir(scene_dir):
                if not fn.startswith("demo_motion_"):
                    continue
                seq_paths.append(join(scene_dir, fn))
        else:
            for aug_name in os.listdir(scene_dir):
                aug_scene_dir = join(scene_dir, aug_name)
                if not isdir(aug_scene_dir):
                    continue
                for fn in os.listdir(aug_scene_dir):
                    if not fn.startswith("demo_motion_"):
                        continue
                    seq_paths.append(join(aug_scene_dir, fn))
    
    # 跑第 clip_idx / N_clip 个batch
    seq_paths.sort()
    assert len(seq_paths) > 0
    N = ((len(seq_paths) - 1) // N_clip) + 1
    seq_paths = seq_paths[N * clip_idx : min(N * (clip_idx+1), len(seq_paths))]

    scene_mesh_dict = {}
    for seq_path in seq_paths:
        if not flag_augmentation:
            scene_name = seq_path.split("/")[-2]
        else:
            scene_name = seq_path.split("/")[-3] + "_" + seq_path.split("/")[-2]
        seq_name = seq_path.split("/")[-1].split(".")[0]
        object_meta_info = json.load(open(join(dirname(seq_path), "meta.json"), "r"))
        seq = load_unihsi_date_one_sequence(seq_path, object_meta_info, partnet_to_shapenet_dict, scene_name=scene_name, scene_mesh_dict=scene_mesh_dict, motion_type=unihsi_data_type, use_whole_motion=True, start_frame=start_frame, end_frame=end_frame, sampling_rate=sampling_rate)
        if seq is None:
            continue
        
        scene_save_dir = join(save_dir, scene_name)
        motion_save_dir = join(scene_save_dir, "h1_kinematic_motions")
        os.makedirs(motion_save_dir, exist_ok=True)
        if not isfile(join(scene_save_dir, "scene_mesh.obj")):
            o3d.io.write_triangle_mesh(join(scene_save_dir, "scene_mesh.obj"), scene_mesh_dict[scene_name])

        # # visualization
        # meshes = [scene_mesh]
        # for i in range(15):
        #     coord = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.05)
        #     coord.transform(seq[0, i])
        #     meshes.append(coord)
        # o3d.visualization.draw_geometries(meshes)
        
        h1_motion = retarget_unihsi_to_h1(chain, link_names, seq, retargeting_method=retargeting_method, device=device)
        save_predicted_h1_motion(h1_motion, join(motion_save_dir, scene_name + "_" + seq_name + ".npz"))
