import os
import numpy as np
import pickle
import torch
from diff_quat import quat_from_matrix, quat_to_rotvec, quat_to_matrix, vec6d_to_matrix


def process(source_data_file, save_path, fps=10, device="cuda:0"):
    data = np.load(source_data_file, allow_pickle=True)["arr_0"].item()
    one_target_jt_pos = data["joint_angles"].astype(np.float32)
        
    one_target_root_rotation = data["global_rotations"].astype(np.float32)
    one_target_root_translation = data["global_translations"].astype(np.float32)

    one_target_link_global_pose = []
    one_target_link_global_pos = []
    one_target_link_global_ori = []
    one_target_link_global_vel = []
    one_target_link_global_ang_vel = []
    for link_name in data["link_global_poses"]:
        print(link_name)
        T = data["link_global_poses"][link_name].astype(np.float32)  # (N_frame, 4, 4)
        one_target_link_global_pose.append(T.copy())  # (N_frame, 4, 4)
        pos = T[:, :3, 3].copy()
        one_target_link_global_pos.append(pos)  # (N_frame, 3)
        ori = T[:, :3, :2].copy()
        one_target_link_global_ori.append(ori.reshape(-1, 6))  # (N_frame, 6)
        vel = np.zeros((T.shape[0], 3))
        vel[1:] = (pos[1:] - pos[:-1]) * fps
        one_target_link_global_vel.append(vel)  # (N_frame, 3)
        ang_vel = np.zeros((T.shape[0], 3))
        ang_vel[1:] = (quat_to_rotvec(quat_from_matrix(torch.from_numpy(T[1:, :3, :3] @ np.linalg.inv(T[:-1, :3, :3])))) * fps).detach().cpu().numpy()
        one_target_link_global_ang_vel.append(ang_vel)  # (N_frame, 3)
    one_target_link_global_pose = np.float32(one_target_link_global_pose)[:20].transpose(1, 0, 2, 3)  # (N_frame, 20, 4, 4)
    one_target_link_global_pos = np.float32(one_target_link_global_pos)[:20].transpose(1, 0, 2)  # (N_frame, 20, 3)
    one_target_link_global_ori = np.float32(one_target_link_global_ori)[:20].transpose(1, 0, 2)  # (N_frame, 20, 6)
    one_target_link_global_vel = np.float32(one_target_link_global_vel)[:20].transpose(1, 0, 2)  # (N_frame, 20, 3)
    one_target_link_global_ang_vel = np.float32(one_target_link_global_ang_vel)[:20].transpose(1, 0, 2)  # (N_frame, 20, 3)

    one_target_jt_vel = np.zeros(one_target_jt_pos.shape).astype(np.float32)
    one_target_jt_vel[1:] = (one_target_jt_pos[1:] - one_target_jt_pos[:-1]) * fps

    one_target_jt_pos = torch.from_numpy(one_target_jt_pos).to(device)
    target_jt_pos = one_target_jt_pos.unsqueeze(0)

    # delta = torch.tensor([0,0,-0.349,0.698,-0.349,0,0,-0.349,0.698,-0.349,0,0,0,0,0,0,0,0,0,]).to(dtype=torch.float32).to(device)
    # target_jt_pos += delta
    one_target_jt_vel = torch.from_numpy(one_target_jt_vel).to(device)
    target_jt_vel = one_target_jt_vel.unsqueeze(0)

    size = torch.tensor([one_target_jt_pos.shape[0]]).to(dtype=torch.long).to(device)
    assert target_jt_pos.shape == (1, size, 19)
    assert target_jt_vel.shape == (1, size, 19)

    if not one_target_root_rotation is None:
        one_target_root_rotation = torch.from_numpy(one_target_root_rotation).to(device)
        target_root_rotation = one_target_root_rotation.unsqueeze(0)
        assert target_root_rotation.shape == (1, size, 3)
    else:
        target_root_rotation = None
    if not one_target_root_translation is None:
        one_target_root_translation = torch.from_numpy(one_target_root_translation).to(device)
        target_root_translation = one_target_root_translation.unsqueeze(0)
        assert target_root_translation.shape == (1, size, 3)
    else:
        target_root_translation = None
    
    target_link_global_pose = torch.from_numpy(one_target_link_global_pose)[:, :20].unsqueeze(0).to(device)
    target_link_global_pos = torch.from_numpy(one_target_link_global_pos)[:, :20].unsqueeze(0).to(device)
    target_link_global_ori = torch.from_numpy(one_target_link_global_ori)[:, :20].unsqueeze(0).to(device)
    target_link_global_vel = torch.from_numpy(one_target_link_global_vel)[:, :20].unsqueeze(0).to(device)
    target_link_global_ang_vel = torch.from_numpy(one_target_link_global_ang_vel)[:, :20].unsqueeze(0).to(device)
    
    target_all_info = {
        "target_jt_pos": target_jt_pos,  # (1, N_frame, 19)
        "target_jt_vel": target_jt_vel,  # (1, N_frame, 19)
        "target_root_rotation": target_root_rotation,  # (1, N_frame, 3)
        "target_root_translation": target_root_translation,  # (1, N_frame, 3)
        "target_link_global_pose": target_link_global_pose,  # (1, N_frame, 20, 4, 4)
        "target_link_global_pos": target_link_global_pos,  # (1, N_frame, 20, 3)
        "target_link_global_ori": target_link_global_ori,  # (1, N_frame, 20, 6)
        "target_link_global_vel": target_link_global_vel,  # (1, N_frame, 20, 3)
        "target_link_global_ang_vel": target_link_global_ang_vel,  # (1, N_frame, 20, 3)
    }

    init_root_xy = one_target_link_global_pos[0, 0, :2].copy()
    one_target_link_global_pos[:, :, :2] -= init_root_xy
    N = one_target_link_global_pos.shape[0]
    data = {
        "motion_0": {
            "global": np.concatenate((one_target_link_global_pos.reshape(N, 60), one_target_link_global_ori.reshape(N, 120), one_target_link_global_vel.reshape(N, 60), one_target_link_global_ang_vel.reshape(N, 60)), axis=1),
            "jt": np.concatenate((one_target_jt_pos.detach().cpu().numpy(), one_target_jt_vel.detach().cpu().numpy()), axis=1),
        },
    }
    pickle.dump(data, open(save_path, "wb"))


if __name__ == "__main__":
    source_data_file = "/home/liuyun/Humanoid_IL_Benchmark/retargeting/CORE4D_retargeted_data/20231003_2_044/h1_kinematic_motions/20231003_2_044_data.npz"
    save_path = "./1motion.pkl"
    process(source_data_file, save_path)
