import os
from os.path import join
import numpy as np
import trimesh
import torch
import smplx
from smplx.lbs import batch_rodrigues
from transforms3d.euler import euler2mat, mat2euler


def load_HOI_data(sequence_dir, person_name, smplx_model_dir, start_frame=0, end_frame=-1, sampling_rate=1, special_process_for_box=False):

    # CORE4D的y轴是地面法向, 转成z轴是地面法向
    delta_R = euler2mat(np.pi / 2, 0, 0).astype(np.float32)
    delta_T = np.eye(4).astype(np.float32)
    delta_T[:3, :3] = delta_R

    obj_mesh = trimesh.load(join(sequence_dir, "object_mesh.obj"))
    obj_motion = np.load(join(sequence_dir, "object_poses.npy")).astype(np.float32)
    if special_process_for_box:
        pre_T = np.eye(4).astype(np.float32)
        if obj_motion[0, 1, 1] < 0.0:  # (要求物体第0帧平放在地面上) 物体的y轴是垂直地面向下的
            pre_T[:3, :3] = euler2mat(np.pi, 0, 0)
        obj_motion = obj_motion @ pre_T  # 转成物体y轴垂直地面向上

    obj_motion = delta_T @ obj_motion
    N_frame = obj_motion.shape[0]
    if start_frame < 0:
        start_frame += N_frame
    if end_frame < 0:
        end_frame += N_frame
    assert start_frame < end_frame
    obj_motion = obj_motion[start_frame:end_frame:sampling_rate]

    human_rawdata = np.load(join(sequence_dir, person_name + "_data.npz"), allow_pickle=True)["arr_0"].item()

    body_model = smplx.create(model_path=smplx_model_dir, model_type='smplx', gender="male", use_pca=True, batch_size=N_frame)
    global_orient = torch.from_numpy(human_rawdata["global_orient"]).to(dtype=torch.float32)  # (N_frame, 3)
    body_pose = torch.from_numpy(human_rawdata["body_pose"]).to(dtype=torch.float32).reshape(N_frame, 63)  # (N_frame, 63)
    transl = torch.from_numpy(human_rawdata["transl"]).to(dtype=torch.float32)  # (N_frame, 3)
    output = body_model(global_orient=global_orient, body_pose=body_pose, betas=torch.from_numpy(human_rawdata["betas"]).to(dtype=torch.float32), transl=transl, return_verts=True, return_full_pose=True)

    local_body_rotations = output["full_pose"].reshape(-1, 55, 3)[:, :22, :]
    local_body_rotations = batch_rodrigues(local_body_rotations.reshape(-1, 3)).reshape(-1, 22, 3, 3)
    global_body_rotations = [local_body_rotations[:, :1]]
    parents = body_model.parents[:22]
    for i in range(1, 22):
        # Subtract the joint location at the rest pose
        # No need for rotation, since it's identity when at rest
        global_joint_rotation = torch.matmul(global_body_rotations[parents[i]][:, 0], local_body_rotations[:, i]).unsqueeze(1)
        global_body_rotations.append(global_joint_rotation)
    global_body_rotations = torch.cat(global_body_rotations, dim=1).detach().cpu().numpy()  # (N_frame, 22, 3, 3)

    human_motion = {
        "joint_positions": human_rawdata["joints"][start_frame:end_frame:sampling_rate] @ delta_R.T,
        "joint_global_rotations": delta_R @ global_body_rotations[start_frame:end_frame:sampling_rate],
    }

    return human_motion, obj_mesh, obj_motion


def load_HOI_data_aligned_by_objposition(sequence_dir, person_name, smplx_model_dir, start_frame=0, end_frame=-1, sampling_rate=1):
    """
    for touch point
    """

    human_motion, obj_mesh, obj_motion = load_HOI_data(sequence_dir, person_name, smplx_model_dir, start_frame, end_frame, sampling_rate, special_process_for_box=False)

    init_objpose = obj_motion[0]
    xy_position = init_objpose[:2, 3].copy()
    init_objpose[:2, 3] -= xy_position
    human_motion["joint_positions"][:, :, :2] -= xy_position

    v = (obj_mesh.vertices @ init_objpose[:3, :3].T) + init_objpose[:3, 3].reshape(1, 3)
    f = obj_mesh.faces
    obj_posed_mesh = trimesh.Trimesh(vertices=v, faces=f)
    
    return human_motion, obj_posed_mesh


def load_HOI_data_aligned_by_objpose(sequence_dir, person_name, smplx_model_dir, start_frame=0, end_frame=-1, sampling_rate=1):
    """
    for carry box
    """
    
    human_motion, obj_mesh, obj_motion = load_HOI_data(sequence_dir, person_name, smplx_model_dir, start_frame, end_frame, sampling_rate, special_process_for_box=True)

    # (要求object初始平放在地面上且其y轴正方向是世界系z轴正方向) align motion使得object在世界系(0,0)处正放
    t = -obj_motion[0, :3, 3].copy()
    t[2] = 0.0
    R = euler2mat(-np.pi / 2, 0, 0).astype(np.float32) @ obj_motion[0, :3, :3].copy()
    angle = mat2euler(R)[1]
    R = euler2mat(0, 0, -angle).astype(np.float32)
    T = np.eye(4).astype(np.float32)
    T[:3, :3] = R
    T[:3, 3] = R @ t
    obj_motion = T @ obj_motion
    human_motion["joint_positions"] = (human_motion["joint_positions"] + t) @ R.T
    human_motion["joint_global_rotations"] = R @ human_motion["joint_global_rotations"]

    return human_motion, obj_mesh, obj_motion
