import os
from os.path import join, isdir
import numpy as np
import torch
from ROAM_utils.load_data import load_roam_data_overall
import trimesh
from transforms3d.euler import euler2mat
from transforms3d.axangles import mat2axangle
from scipy.spatial.transform  import Rotation as sRot
from mesh import merge_mesh, save_mesh
from h1_kinematics import H1_Motion_Model, load_urdf, get_h1_link_names, forward_kinematics, save_predicted_h1_motion
from utils.rotation import rot_decompose
import open3d as o3d


def get_joint_position_correspondence():
    """
    [h1 joint index, corresponding ROAM joint index, father index of the H1 joint on the abstract skeleton, father index of the ROAM joint on the abstract skeleton]
    """
    joint_correspondence = [
        [0, 0, -1, -1],
        [1, 17, 0, 0],
        [2, 17, 0, 0],
        # [3, 17],
        [4, 18, 2, 17],
        [5, 19, 4, 18],
        [6, 22, 0, 0],
        [7, 22, 0, 0],
        # [8, 22],
        [9, 23, 7, 22],
        [10, 24, 9, 23],
        [11, 0, -1, -1],
        [12, 10, 11, 0],
        # [13, 10],
        # [14, 11],
        [15, 11, 12, 10],
        [16, 14, 11, 0],
        # [17, 14],
        # [18, 15],
        [19, 15, 16, 14],
    ]
    return joint_correspondence


def get_joint_orientation_correspondence():
    """
    [h1 joint index, corresponding ROAM joint index]
    """
    joint_correspondence = [
        [0, 4],
        # [1, 17],
        # [2, 17],
        # [3, 17],
        [4, 18],
        [5, 19],
        # [6, 22],
        # [7, 22],
        # [8, 22],
        [9, 23],
        [10, 24],
        # [11, 0],
        # [12, 10],
        # [13, 10],
        # [14, 11],
        # [15, 11],
        # [16, 14],
        # [17, 14],
        # [18, 15],
        # [19, 15],
    ]
    return joint_correspondence


def preprocess_gt_info_roam(roam_motion, device="cuda:0"):
    gt_joint_positions = torch.from_numpy(roam_motion[:, :, :3, 3]).to(device)  # (N, 27, 3)

    N_frame = gt_joint_positions.shape[0]

    h1_to_roam = torch.tensor([[0,-1,0],[0,0,1],[-1,0,0]]).to(dtype=torch.float32).to(device)
    gt_joint_orientations = torch.einsum('bcij,bcjk->bcik', torch.from_numpy(roam_motion[:, :, :3, :3]).to(device), h1_to_roam.reshape(1, 1, 3, 3))

    return N_frame, gt_joint_positions, gt_joint_orientations


def align_bone_length_to_H1(N_frame, gt_joint_positions, gt_joint_orientations):
    # prepare info
    joint_correspondence = get_joint_position_correspondence()
    H1_link_position_example = np.load("./utils/20link_pose_example.npy")[:, :3, 3]  # (20, 3)

    device = gt_joint_positions.device
    gt_joint_positions = gt_joint_positions.detach().cpu()

    aligned_gt_joint_positions = torch.zeros(gt_joint_positions.shape)
    for frame_idx in range(N_frame):
        gt = gt_joint_positions[frame_idx]  # (27, 3)
        delta = torch.zeros(gt.shape)
        # get the bias for each joint
        for joint_corr in joint_correspondence:
            curr_h1, curr_human, parent_h1, parent_human = joint_corr
            if parent_human == -1:
                continue
            h1_bone_length = ((H1_link_position_example[curr_h1] - H1_link_position_example[parent_h1])**2).sum()**0.5
            curr_human_bone = gt[curr_human] - gt[parent_human]
            delta[curr_human] = h1_bone_length * (curr_human_bone / torch.clamp(curr_human_bone.norm(p=None), 1e-6, None)) - curr_human_bone
        # accumulate the bias
        for joint_corr in joint_correspondence:
            _, curr_human, _, parent_human = joint_corr
            if parent_human == -1:
                continue
            delta[curr_human] += delta[parent_human]
        delta[12] += delta[11]
        delta[16] += delta[15]
        # save
        aligned_gt_joint_positions[frame_idx] = gt + delta

    aligned_gt_joint_positions = aligned_gt_joint_positions.to(dtype=gt_joint_positions.dtype).to(device)
    return N_frame, aligned_gt_joint_positions, gt_joint_orientations


def complete_h1_motion(h1_motion, chain, link_names, device="cuda:0"):
    pred_link_to_world_dict = forward_kinematics(chain, link_names, h1_motion["joint_angles"], global_rotation=h1_motion["global_rotations"], global_translation=h1_motion["global_translations"], device=device)
    pred_link_to_world_dict_np = {}
    for link_name in pred_link_to_world_dict:
        pred_link_to_world_dict_np[link_name] = pred_link_to_world_dict[link_name].detach().cpu().numpy()  # (N_frame, 4, 4)
    h1_motion["link_global_poses"] = pred_link_to_world_dict_np
    return h1_motion


def visualize(poses, scene_mesh):
    meshes = [scene_mesh]
    for i in range(poses.shape[0]):
        coord = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.05)
        coord.transform(poses[i])
        meshes.append(coord)
    o3d.visualization.draw_geometries(meshes)


def optimization_roam_to_h1(chain, link_names, roam_motion, scene_mesh, method_name="omnih2o", device="cuda:0"):
    """
    chain: h1 chain
    link_names: h1 link names
    roam_motion: a np.float32 representing the pose of each joint in each frame in world space, shape = (N, 27, 4, 4)

    return: {"joint_angles": (N, 19), "global_rotation": (N, 3), "global_orientation": (N, 3)}
    """

    N_frame, gt_joint_positions, gt_joint_orientations = preprocess_gt_info_roam(roam_motion, device=device)
    if method_name == "omnih2o":
        print("hand positions before:", gt_joint_positions[-1, 12], gt_joint_positions[-1, 16])
        N_frame, gt_joint_positions, gt_joint_orientations = align_bone_length_to_H1(N_frame, gt_joint_positions, gt_joint_orientations)
        print("hand positions after:", gt_joint_positions[-1, 12], gt_joint_positions[-1, 16])

    ########################## start optimization #################################
    init_global_translations = gt_joint_positions[:, 0].clone()  # (N, 3)
    init_global_rotations = []
    for i in range(N_frame):
        axis, angle = mat2axangle(gt_joint_orientations[i, 0].detach().cpu().numpy(), unit_thresh=1e-3)
        init_global_rotations.append(axis * angle)
    init_global_rotations = torch.from_numpy(np.float32(init_global_rotations)).to(device)  # (N, 3)
    h1_motion_model = H1_Motion_Model(batch_size=N_frame, init_global_translations=init_global_translations, init_global_rotations=init_global_rotations, device=device)

    optimizer = torch.optim.Adam(h1_motion_model.parameters(), lr=2e-2)
    h1_motion_model.train()

    joint_position_corrs = get_joint_position_correspondence()
    joint_orientation_corrs = get_joint_orientation_correspondence()

    for epoch in range(2000):
        h1_motion = h1_motion_model()

        pred_link_to_world_dict = forward_kinematics(chain, link_names, h1_motion["joint_angles"], global_rotation=h1_motion["global_rotations"], global_translation=h1_motion["global_translations"], device=device)

        joint_global_position_loss = 0
        for joint_corr in joint_position_corrs:
            if ("pelvis" in link_names[joint_corr[0]]) or ("ankle" in link_names[joint_corr[0]]):
                scale = 5.0
            else:
                scale = 1.0
            joint_global_position_loss += scale * ((pred_link_to_world_dict[link_names[joint_corr[0]]][:, :3, 3] - gt_joint_positions[:, joint_corr[1]])**2).sum(dim=-1).mean()
        
        pred_joint_angles = h1_motion["joint_angles"]
        pred_joint_velocities = pred_joint_angles[1:] - pred_joint_angles[:-1]
        pred_joint_accelerations = pred_joint_velocities[1:] - pred_joint_velocities[:-1]
        pred_root_linear_velocities = pred_link_to_world_dict["pelvis"][1:, :3, 3] - pred_link_to_world_dict["pelvis"][:-1, :3, 3]
        pred_root_linear_acceleration = pred_root_linear_velocities[1:] - pred_root_linear_velocities[:-1]
        joint_local_velocity_loss = pred_joint_velocities.abs().sum(dim=-1).mean()
        joint_local_acceleration_loss = pred_joint_accelerations.abs().sum(dim=-1).mean()
        root_global_linear_acceleration_loss = (pred_root_linear_acceleration**2).sum(dim=-1).mean()

        # joint global rotation loss
        joint_global_orientation_loss = 0
        for joint_corr in joint_orientation_corrs:
            pred_R = pred_link_to_world_dict[link_names[joint_corr[0]]][:, :3, :3]  # (N, 3, 3)
            gt_R = gt_joint_orientations[:, joint_corr[1]]  # (N, 3, 3)
            joint_global_orientation_loss += (pred_R - gt_R).abs().sum(dim=-1).sum(dim=-1).mean()

        # TODO: add contact loss

        loss = 1.0 * joint_global_position_loss + 1.0 * joint_local_acceleration_loss + 1.0 * joint_global_orientation_loss

        if epoch % 100 == 0:
            print(epoch, loss.item(), joint_global_position_loss.item(), joint_local_acceleration_loss.item(), joint_global_orientation_loss.item())

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    
    ########################## finish optimization ################################

    h1_motion = h1_motion_model()
    h1_motion = complete_h1_motion(h1_motion, chain, link_names, device=device)
    return h1_motion


def copy_rotation_roam_to_h1(chain, link_names, roam_motion, device="cuda:0"):
    """
    chain: h1 chain
    link_names: h1 link names
    roam_motion: a np.float32 representing the pose of each joint in each frame in world space, shape = (N, 27, 4, 4)

    return: {"joint_angles": (N, 19), "global_rotation": (N, 3), "global_orientation": (N, 3)}
    """

    N_frame, gt_joint_positions, gt_joint_orientations = preprocess_gt_info_roam(roam_motion, device=device)

    ########################## start copy rotation #################################
    root_rotation_idx = 2
    
    global_translations = gt_joint_positions[:, 0].clone()  # (N, 3)
    global_rotations = []
    for i in range(N_frame):
        axis, angle = mat2axangle(gt_joint_orientations[i, root_rotation_idx].detach().cpu().numpy(), unit_thresh=1e-3)
        global_rotations.append(axis * angle)
    global_rotations = torch.from_numpy(np.float32(global_rotations)).to(device)  # (N, 3)

    joint_angles = torch.zeros(N_frame, 19).to(dtype=torch.float32).to(device)
    # thighs
    R_left_thigh_to_pelvis = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, root_rotation_idx].permute(0, 2, 1), gt_joint_orientations[:, 17])
    R_right_thigh_to_pelvis = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, root_rotation_idx].permute(0, 2, 1), gt_joint_orientations[:, 22])
    euler = torch.from_numpy(sRot.from_matrix(R_left_thigh_to_pelvis.detach().cpu().numpy()).as_euler('yxz'))[:, [2,1,0]]
    joint_angles[:, 0:3] = euler
    euler = torch.from_numpy(sRot.from_matrix(R_right_thigh_to_pelvis.detach().cpu().numpy()).as_euler('yxz'))[:, [2,1,0]]
    joint_angles[:, 5:8] = euler
    # knees
    R_left_knee_to_thigh = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 17].permute(0, 2, 1), gt_joint_orientations[:, 18])
    R_right_knee_to_thigh = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 22].permute(0, 2, 1), gt_joint_orientations[:, 23])
    euler = torch.from_numpy(sRot.from_matrix(R_left_knee_to_thigh.detach().cpu().numpy()).as_euler('yzx'))[:, 0]
    joint_angles[:, 3] = euler
    euler = torch.from_numpy(sRot.from_matrix(R_right_knee_to_thigh.detach().cpu().numpy()).as_euler('yzx'))[:, 0]
    joint_angles[:, 8] = euler
    # ankles
    R_left_ankle_to_knee = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 18].permute(0, 2, 1), gt_joint_orientations[:, 19])
    R_right_ankle_to_knee = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 23].permute(0, 2, 1), gt_joint_orientations[:, 24])
    euler = torch.from_numpy(sRot.from_matrix(R_left_ankle_to_knee.detach().cpu().numpy()).as_euler('yzx'))[:, 0]
    joint_angles[:, 4] = euler
    euler = torch.from_numpy(sRot.from_matrix(R_right_ankle_to_knee.detach().cpu().numpy()).as_euler('yzx'))[:, 0]
    joint_angles[:, 9] = euler
    # torso
    R_torso_to_pelvis = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, root_rotation_idx].permute(0, 2, 1), gt_joint_orientations[:, 4])
    euler = torch.from_numpy(sRot.from_matrix(R_torso_to_pelvis.detach().cpu().numpy()).as_euler('zxy'))[:, 0]
    joint_angles[:, 10] = euler
    # shoulders and elbows
    theta = np.pi / 180 * 25
    left_shoulder_pitch_rot = sRot.from_euler('x', theta)
    left_shoulder_pitch_axis = left_shoulder_pitch_rot.apply([0,1,0])
    right_shoulder_pitch_rot = sRot.from_euler('x', -theta)
    right_shoulder_pitch_axis = right_shoulder_pitch_rot.apply([0,1,0])
    R_left_shoulder_to_torso = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 4].permute(0, 2, 1), gt_joint_orientations[:, 10]).detach().cpu().numpy()
    Ryp_theta, Rxz = rot_decompose(R_left_shoulder_to_torso, left_shoulder_pitch_axis)
    Rx_theta, Rz = rot_decompose(Rxz, np.float64([1,0,0]))
    Rz_theta, _ = rot_decompose(Rz, np.float64([0,0,1]))
    euler = torch.stack([Ryp_theta, Rx_theta + np.pi / 2, Rz_theta], dim=-1)
    joint_angles[:, 11:14] = euler
    R_right_shoulder_to_torso = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 4].permute(0, 2, 1), gt_joint_orientations[:, 14]).detach().cpu().numpy()
    Ryp_theta, Rxz = rot_decompose(R_right_shoulder_to_torso, right_shoulder_pitch_axis)
    Rx_theta, Rz = rot_decompose(Rxz, np.float64([1,0,0]))
    Rz_theta, _ = rot_decompose(Rz, np.float64([0,0,1]))
    euler = torch.stack([Ryp_theta, Rx_theta - np.pi / 2, Rz_theta], dim=-1)
    joint_angles[:, 15:18] = euler
    R_left_elbow_to_shoulder = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 10].permute(0, 2, 1), gt_joint_orientations[:, 11]).detach().cpu().numpy()
    left_elbow_angle, left_elbow_R2 = rot_decompose(R_left_elbow_to_shoulder, np.float64([0,0,1]))
    left_shoulder_conpen, _ = rot_decompose(left_elbow_R2, np.float64([1,0,0]))
    joint_angles[:, 13] += left_shoulder_conpen.to(device)
    joint_angles[:, 14] = left_elbow_angle + np.pi/2
    R_right_elbow_to_shoulder = torch.einsum('bij,bjk->bik', gt_joint_orientations[:, 14].permute(0, 2, 1), gt_joint_orientations[:, 15]).detach().cpu().numpy()
    right_elbow_angle, right_elbow_R2 = rot_decompose(R_right_elbow_to_shoulder, np.float64([0,0,-1]))
    right_shoulder_conpen, _ = rot_decompose(right_elbow_R2, np.float64([1,0,0]))
    joint_angles[:, 17] += right_shoulder_conpen.to(device)
    joint_angles[:, 18] = right_elbow_angle + np.pi/2
    ########################## finish copy rotation ################################

    h1_motion = {
        "joint_angles": joint_angles,
        "global_rotations": global_rotations,
        "global_translations": global_translations,
    }
    h1_motion = complete_h1_motion(h1_motion, chain, link_names, device=device)
    return h1_motion


def retarget_roam_to_h1(chain, link_names, roam_motion, scene_mesh, retargeting_method="optimization", device="cuda:0"):
    if retargeting_method == "optimization":
        return optimization_roam_to_h1(chain, link_names, roam_motion, scene_mesh, device=device)
    elif retargeting_method == "omnih2o":
        return optimization_roam_to_h1(chain, link_names, roam_motion, scene_mesh, method_name="omnih2o", device=device)
    elif retargeting_method == "copy_rotation":
        return copy_rotation_roam_to_h1(chain, link_names, roam_motion, device=device)
    else:
        raise NotImplementedError


if __name__ == "__main__":

    ################################ change here ###############################################
    # NOTE: 这一步得到的motion的坐标系和kinematic data一致，跑完之后还需要跑align_retargeted_data.py把结果按object pose对齐
    roam_data_root = "/home/liuyun/Humanoid_IL_Benchmark/Kinematic_Data/ROAM"
    roam_data_type = "sofa_lie"  # "chair_sit_multipose"
    save_dir = "./ROAM_retargeted_data_" + roam_data_type + "_copyrot_notalign"
    h1_urdf_path = "/home/liuyun/Humanoid_IL_Benchmark/retargeting/assets/h1_description/urdf/h1.urdf"
    retargeting_method = "copy_rotation"  # optimization / copy_rotation / omnih2o

    start_frame, end_frame = [0, -1]  # 每段原始数据截取[start_frame, end_frame]的范围做retargeting, 负数表示倒数的第几帧
    sampling_rate = 1  # 每隔多少帧抽一帧做retargeting

    device = "cuda:0"
    N_split = 1  # 如果要多进程并行，这里指定一共分多少batch
    split = 0  # 当前进程做哪个batch
    ############################################################################################

    chain = load_urdf(h1_urdf_path, device=device)
    link_names = get_h1_link_names()

    scene_obj_name_dict = {}
    for fn in os.listdir(join(roam_data_root, roam_data_type, "HumanPoses")):
        if not isdir(join(roam_data_root, roam_data_type, "HumanPoses", fn)):
            continue
        scene_obj_name_dict[fn.split("_")[0]] = 1
    scene_obj_names = []
    for scene_obj_name in scene_obj_name_dict.keys():
        if isdir(join("./ROAM_retargeted_data", scene_obj_name)):
            continue
        scene_obj_names.append(scene_obj_name)
    
    # 跑第 split / N_split 个batch
    scene_obj_names.sort()
    assert len(scene_obj_names) > 0
    N = ((len(scene_obj_names) - 1) // N_split) + 1
    scene_obj_names = scene_obj_names[N*split : min(N*(split+1), len(scene_obj_names))]

    for scene_obj_name in scene_obj_names:
        scene_names = []
        for scene_name in os.listdir(join(roam_data_root, roam_data_type, "HumanPoses")):
            if scene_name.startswith(scene_obj_name) and isdir(join(roam_data_root, roam_data_type, "HumanPoses", scene_name)):
                scene_names.append(scene_name)
        scene_names.sort()
        roam_data_metadata = {
            "human_skeleton_path": join(roam_data_root, "BoneInfo.json"),
            "sequences": [],
        }
        for scene_name in scene_names:
            meta = {
                "obj_mesh_path": join(roam_data_root, roam_data_type, "ObjectDemoCollections", scene_name + "_mesh_world.obj"),
                "obj_pose_path": join(roam_data_root, roam_data_type, "HumanPoses", scene_name, "ObjectPose.npy"),
                "human_positions_path": join(roam_data_root, roam_data_type, "HumanPoses", scene_name, "Positions.json"),
                "human_rotations_path": join(roam_data_root, roam_data_type, "HumanPoses", scene_name, "Rotations.json"),
            }
            roam_data_metadata["sequences"].append(meta)

        N_seq = len(roam_data_metadata["sequences"])
        print("############ sequence number = {} ############".format(N_seq))
        print("############ start loading data ... ############")
        roam_sequence_info = load_roam_data_overall(roam_data_metadata, start_frame=start_frame, end_frame=end_frame, sampling_rate=sampling_rate)
        print("############ finish loading data !!! ############")

        os.makedirs(join(save_dir, scene_obj_name, "h1_kinematic_motions"), exist_ok=True)
        # save scene mesh
        # NOTE: 要求所有sequence共享完全相同的scene mesh
        transform_matrix = np.load(roam_data_metadata["sequences"][0]["obj_pose_path"])
        # eliminate the world space difference between ROAM and H1 environment
        T = np.eye(4).astype(np.float32)
        T[:3, :3] = np.float32([[1,0,0],[0,0,-1],[0,1,0]])
        transform_matrix = T @ transform_matrix
        scene_mesh = o3d.io.read_triangle_mesh(roam_data_metadata["sequences"][0]["obj_mesh_path"]).transform(transform_matrix)
        o3d.io.write_triangle_mesh(join(save_dir, scene_obj_name, "scene_mesh.obj"), scene_mesh)

        for i, (scene_name, seq) in enumerate(zip(scene_names, roam_sequence_info)):
            print("############ start retargeting sequence {} / {} ... ############".format(i, N_seq))

            h1_motion = retarget_roam_to_h1(chain, link_names, seq, scene_mesh, retargeting_method=retargeting_method, device=device)
            save_predicted_h1_motion(h1_motion, join(save_dir, scene_obj_name, "h1_kinematic_motions", "{}.npz".format(scene_name)))
