import numpy as np
import pickle
import trimesh
from transforms3d.quaternions import quat2mat
import open3d as o3d


def get_amp_humanoid_link_names():
    link_names = [
        "pelvis",
        "torso",
        "head",
        "right_upper_arm",
        "right_lower_arm",
        "right_hand",
        "left_upper_arm",
        "left_lower_arm",
        "left_hand",
        "right_thigh",
        "right_shin",
        "right_foot",
        "left_thigh",
        "left_shin",
        "left_foot",
    ]
    assert len(link_names) == 15
    return link_names


def load_unihsi_date_one_sequence(seq_path, object_meta_info, partnet_to_shapenet_dict, scene_name, scene_mesh_dict, motion_type="sit", use_whole_motion=False, start_frame=0, end_frame=-1, sampling_rate=1):
    """
    return: a np.float32 representing the pose of each link in each frame in world space, shape = (N_frame, 15, 4, 4) / None
    besides, update scene_mesh_dict
    """

    data = pickle.load(open(seq_path, "rb"))
    if not data["fulfill"]:  # 动作不成功
        return None

    object_partnet_id = data["pid"]
    assert motion_type in ["walk", "sit", "lie"], "[error in loading {}] no motion_type: {}".format(seq_path, motion_type)
    sequential_motion_types = [motion_type]
    if use_whole_motion:
        if motion_type == "sit":
            sequential_motion_types = ["walk", "sit"]
        elif motion_type == "lie":
            sequential_motion_types = ["walk", "sit", "lie"]

    link_states = None
    assert sequential_motion_types[-1] in data
    for step_motion_type in sequential_motion_types:
        if not step_motion_type in data:
            continue
        if link_states is None:
            link_states = data[step_motion_type]["rigid_body_states"].detach().cpu().numpy()
        else:
            link_states = np.concatenate((link_states, data[step_motion_type]["rigid_body_states"].detach().cpu().numpy()), axis=0)
    N_frame = link_states.shape[0]
    assert link_states.shape == (N_frame, 15, 13)

    if start_frame < 0:
        start_frame = max(0, N_frame + start_frame)
    if end_frame < 0:
        end_frame = max(0, N_frame + end_frame)
    assert start_frame <= end_frame

    # motion
    seq = []
    for frame_idx in range(start_frame, end_frame+1, sampling_rate):
        link_poses = link_states[frame_idx, :, :7]
        final_pose = np.eye(4).reshape(1, 4, 4).repeat(15, axis=0)
        final_pose[:, :3, 3] = link_poses[:, :3]
        for i in range(15):
            q_xyzw = link_poses[i, 3:7]
            final_pose[i, :3, :3] = quat2mat([q_xyzw[3], q_xyzw[0], q_xyzw[1], q_xyzw[2]])
        seq.append(final_pose)
    seq = np.float32(seq)

    # object mesh
    if not scene_name in scene_mesh_dict:
        obj_mesh = o3d.io.read_triangle_mesh(partnet_to_shapenet_dict[object_partnet_id])
        for r in object_meta_info["rotate"]:
            R = obj_mesh.get_rotation_matrix_from_xyz(r)
            obj_mesh.rotate(R, center=(0, 0, 0))

        # rescale
        scale_factors = object_meta_info["scale"]
        if (isinstance(scale_factors, int)) or (isinstance(scale_factors, float)):
            print("[warning] the scale is a scalar, not a list !!!")
            scale_factors = [scale_factors, scale_factors, scale_factors]
        T_scale = np.eye(4)
        T_scale[:3, :3] = np.diag(scale_factors)
        T_to_origin = np.eye(4)
        T_to_origin[:3, 3] = -obj_mesh.get_center()
        T_back = -T_to_origin
        T = T_back @ T_scale @ T_to_origin
        obj_mesh.transform(T)

        obj_mesh_v = np.float32(obj_mesh.vertices)
        obj_mesh.translate((0, 0, -obj_mesh_v[:, 2].min()))
        obj_mesh.translate(object_meta_info["transfer"])
        scene_mesh_dict[scene_name] = obj_mesh
    
    return seq
