import os
import numpy as np
import torch
from SAMP_utils.load_data import load_smplx_motion
import trimesh
from transforms3d.euler import euler2mat
from mesh import merge_mesh, save_mesh
from h1_kinematics import H1_Motion_Model, load_urdf, get_h1_link_names, forward_kinematics, save_predicted_h1_motion
import open3d as o3d


def get_joint_position_correspondence():
    """
    [h1 joint index, corresponding SMPLX joint index]
    """
    joint_correspondence = [
        [0, 0],
        [1, 1],
        [2, 1],
        # [3, 1],
        [4, 4],
        [5, 7],
        [6, 2],
        [7, 2],
        # [8, 2],
        [9, 5],
        [10, 8],
        [11, 0],
        [12, 16],
        # [13, 16],
        [14, 18],
        [15, 20],
        [16, 17],
        # [17, 17],
        [18, 19],
        [19, 21],
    ]
    return joint_correspondence


def get_joint_orientation_correspondence():
    """
    [h1 joint index, corresponding SMPLX joint index]
    """
    joint_correspondence = [
        [0, 0],
        # [1, 1],
        # [2, 1],
        # [3, 1],
        [4, 4],
        [5, 7],
        # [6, 2],
        # [7, 2],
        # [8, 2],
        [9, 5],
        [10, 8],
        [11, 0],
        # [12, 16],
        # [13, 16],
        # [14, 18],
        # [15, 20],
        # [16, 17],
        # [17, 17],
        # [18, 19],
        # [19, 21],
    ]
    return joint_correspondence


def retarget_smplx_to_h1(chain, link_names, smplx_motion, device="cuda:0"):
    """
    chain: h1 chain
    link_names: h1 link names
    smplx_motion: {"joint_positions": (N, 127, 3),"joint_global_rotations": (N, 22, 3, 3)}, all items are numpy

    return: {"joint_angles": (N, 19), "global_rotation": (N, 3), "global_orientation": (N, 3)}
    """

    gt_joint_positions = torch.from_numpy(smplx_motion["joint_positions"]).to(device)
    cx, cy = gt_joint_positions[0, 0, 0].clone(), gt_joint_positions[0, 0, 1].clone()  # 要clone!!!不然下面两行减不干净, -=的执行逻辑不是我想象的那样!!!!!!
    gt_joint_positions[:, :, 0] -= cx
    gt_joint_positions[:, :, 1] -= cy

    N_frame = gt_joint_positions.shape[0]

    h1_to_smplx = torch.tensor([[0,1,0],[0,0,1],[1,0,0]]).to(dtype=torch.float32).to(device)
    gt_joint_orientations = torch.einsum('bcij,bcjk->bcik', torch.from_numpy(smplx_motion["joint_global_rotations"]).to(device), h1_to_smplx.reshape(1, 1, 3, 3))

    ########################## start optimization #################################
    h1_motion_model = H1_Motion_Model(batch_size=N_frame, device=device)

    optimizer = torch.optim.Adam(h1_motion_model.parameters(), lr=2e-2)
    h1_motion_model.train()

    joint_position_corrs = get_joint_position_correspondence()
    joint_orientation_corrs = get_joint_orientation_correspondence()

    for epoch in range(3000):
        h1_motion = h1_motion_model()

        pred_link_to_world_dict = forward_kinematics(chain, link_names, h1_motion["joint_angles"], global_rotation=h1_motion["global_rotations"], global_translation=h1_motion["global_translations"], device=device)

        joint_global_position_loss = 0
        for joint_corr in joint_position_corrs:
            if ("pelvis" in link_names[joint_corr[0]]) or ("ankle" in link_names[joint_corr[0]]):
                scale = 5.0
            else:
                scale = 1.0
            joint_global_position_loss += scale * ((pred_link_to_world_dict[link_names[joint_corr[0]]][:, :3, 3] - gt_joint_positions[:, joint_corr[1]])**2).sum(dim=-1).mean()
        
        pred_joint_angles = h1_motion["joint_angles"]
        pred_joint_velocities = pred_joint_angles[1:] - pred_joint_angles[:-1]
        pred_joint_accelerations = pred_joint_velocities[1:] - pred_joint_velocities[:-1]
        pred_root_linear_velocities = pred_link_to_world_dict["pelvis"][1:, :3, 3] - pred_link_to_world_dict["pelvis"][:-1, :3, 3]
        pred_root_linear_acceleration = pred_root_linear_velocities[1:] - pred_root_linear_velocities[:-1]
        joint_local_velocity_loss = pred_joint_velocities.abs().sum(dim=-1).mean()
        joint_local_acceleration_loss = pred_joint_accelerations.abs().sum(dim=-1).mean()
        root_global_linear_acceleration_loss = (pred_root_linear_acceleration**2).sum(dim=-1).mean()

        # joint global rotation loss
        joint_global_orientation_loss = 0
        for joint_corr in joint_orientation_corrs:
            pred_R = pred_link_to_world_dict[link_names[joint_corr[0]]][:, :3, :3]  # (N, 3, 3)
            gt_R = gt_joint_orientations[:, joint_corr[1]]  # (N, 3, 3)
            joint_global_orientation_loss += (pred_R - gt_R).abs().sum(dim=-1).sum(dim=-1).mean()

        # TODO: add contact loss

        loss = 1.0 * joint_global_position_loss + 1.0 * joint_local_acceleration_loss + 1.0 * joint_global_orientation_loss

        if epoch % 100 == 0:
            print(epoch, loss.item(), joint_global_position_loss.item(), joint_local_acceleration_loss.item(), joint_global_orientation_loss.item())

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()
    
    ########################## finish optimization ################################

    h1_motion = h1_motion_model()
    pred_link_to_world_dict = forward_kinematics(chain, link_names, h1_motion["joint_angles"], global_rotation=h1_motion["global_rotations"], global_translation=h1_motion["global_translations"], device=device)
    pred_link_to_world_dict_np = {}
    for link_name in pred_link_to_world_dict:
        pred_link_to_world_dict_np[link_name] = pred_link_to_world_dict[link_name].detach().cpu().numpy()  # (N_frame, 4, 4)
    h1_motion["link_global_poses"] = pred_link_to_world_dict_np
    return h1_motion


if __name__ == "__main__":

    device = "cuda:0"

    chain = load_urdf("/home/liuyun/Humanoid_IL_Benchmark/retargeting/assets/h1_description/urdf/h1.urdf", device=device)
    link_names = get_h1_link_names()

    # load gt data
    smplx_file_path = "/media/liuyun/TOSHIBA EXT/SAMP/pkl/chair_mo001_stageII.pkl"
    # smplx_file_path = "/media/liuyun/TOSHIBA EXT/SAMP/pkl/armchair007_stageII.pkl"
    # smplx_file_path = "/media/liuyun/TOSHIBA EXT/SAMP/pkl/sofa012_stageII.pkl"
    # smplx_file_path = "/media/liuyun/TOSHIBA EXT/SAMP/pkl/run_circular_stageII.pkl"
    # smplx_file_path = "/media/liuyun/TOSHIBA EXT/SAMP/pkl/chair_mo002_stageII.pkl"
    smplx_model_path = "./SAMP_utils/models"
    smplx_motion = load_smplx_motion(smplx_file_path, smplx_model_path, start_frame=1000, end_frame=1600, sampling_rate=12)
    
    # jp = smplx_motion["joint_positions"][0, :22]
    # jr = smplx_motion["joint_global_rotations"][0]
    # meshes = []
    # for i in range(22):
    #     m_o3d = o3d.geometry.TriangleMesh.create_coordinate_frame(size=0.1)
    #     v = np.float32(m_o3d.vertices)
    #     f = np.int32(m_o3d.triangles)
    #     c = np.uint8(np.float32(m_o3d.vertex_colors) * 255.0)
    #     v = (v @ jr[i].T) + jp[i].reshape(1, 3)
    #     m = trimesh.Trimesh(vertices=v, faces=f, vertex_colors=c)
    #     meshes.append(m)
    # save_mesh(m, "./ex.obj")
    # assert False

    h1_motion = retarget_smplx_to_h1(chain, link_names, smplx_motion, device=device)
    save_predicted_h1_motion(h1_motion, "./pred.npz")
