import os
from os.path import join, isdir
import numpy as np
import pickle
import torch
from transforms3d.axangles import mat2axangle
from legged_gym.utils.diff_quat import quat_from_matrix, quat_to_rotvec, quat_to_matrix, vec6d_to_matrix


def load_target_jt_amass(device, file, offset, fps=10, target_num_dof=19, dynamic_object=False, paste_last_motion=0):
    import joblib
    data_list = joblib.load(file)
    target_jt = []
    target_global = []
    target_length = []
    if 'recycle' in file:
        for data in data_list:
            if data is None:
                continue
            one_target_jt = torch.from_numpy(data['jt_B'])#.to(device)
            one_target_global = torch.from_numpy(data['global_B'])#.to(device)
            target_jt.append(one_target_jt)
            target_global.append(one_target_global)
            target_length.append(one_target_jt.shape[0])
    else:
        for name, data in data_list.items():
            if name != "motion_1":
                continue
            one_target_jt = torch.from_numpy(data['jt'])#.to(device)
            one_target_global = torch.from_numpy(data['global'])#.to(device)
            target_jt.append(one_target_jt)
            target_global.append(one_target_global)
            target_length.append(one_target_jt.shape[0])
    
    size = torch.tensor(target_length).to(dtype=torch.int32).to(device)
    max_size = size.max()

    target_jt_pos = torch.zeros(size.shape[0], max_size, 19).to(dtype=torch.float32).to(device)
    for i, one_target_jt in enumerate(target_jt):
        target_jt_pos[i, :size[i]] = one_target_jt[:, :19].clone()
    target_all_info = {
        "target_jt_pos": target_jt_pos,  # (1, N_frame, 10/19)
        "target_jt_vel": torch.zeros(target_jt_pos.shape).to(dtype=torch.float32).to(device),  # (1, N_frame, 10/19)
        "target_root_rotation": torch.zeros(size.shape[0], max_size, 3).to(dtype=torch.float32).to(device),  # (1, N_frame, 3)
        "target_root_translation": torch.zeros(size.shape[0], max_size, 3).to(dtype=torch.float32).to(device),  # (1, N_frame, 3)
        "target_link_global_pose": torch.zeros(size.shape[0], max_size, 20, 4, 4).to(dtype=torch.float32).to(device),  # (1, N_frame, 11/20, 4, 4)
        "target_link_global_pos": torch.zeros(size.shape[0], max_size, 20, 3).to(dtype=torch.float32).to(device),  # (1, N_frame, 11/20, 3)
        "target_link_global_ori": torch.zeros(size.shape[0], max_size, 20, 6).to(dtype=torch.float32).to(device),  # (1, N_frame, 11/20, 6)
        "target_link_global_vel": torch.zeros(size.shape[0], max_size, 20, 3).to(dtype=torch.float32).to(device),  # (1, N_frame, 11/20, 3)
        "target_link_global_ang_vel": torch.zeros(size.shape[0], max_size, 20, 3).to(dtype=torch.float32).to(device),  # (1, N_frame, 11/20, 3)
    }
    for i, one_target_jt in enumerate(target_jt):
        target_all_info["target_jt_vel"][i, :size[i]] = one_target_jt[:, 19:38].clone()
        target_all_info["target_root_translation"][i, :size[i]] = target_global[i][:, :3].clone()
        root_Rs = vec6d_to_matrix(target_global[i][:, 60:66].reshape(-1, 3, 2))
        for frame_idx in range(size[i]):
            axis, angle = mat2axangle(root_Rs[frame_idx].detach().cpu().numpy())
            target_all_info["target_root_rotation"][i, frame_idx] = torch.from_numpy(axis * angle).to(dtype=torch.float32).to(device)
        target_all_info["target_link_global_pose"][i, :size[i], :, :3, :3] = vec6d_to_matrix(target_global[i][:, 60:180].reshape(-1, 20, 3, 2)).clone()
        target_all_info["target_link_global_pose"][i, :size[i], :, :3, 3] = target_global[i][:, :60].reshape(-1, 20, 3).clone()
        target_all_info["target_link_global_pose"][i, :size[i], :, 3, 3] = 1
        target_all_info["target_link_global_pos"][i, :size[i]] = target_global[i][:, :60].reshape(-1, 20, 3).clone()
        target_all_info["target_link_global_ori"][i, :size[i]] = target_global[i][:, 60:180].reshape(-1, 20, 6).clone()
        target_all_info["target_link_global_vel"][i, :size[i]] = target_global[i][:, 180:240].reshape(-1, 20, 3).clone()
        target_all_info["target_link_global_ang_vel"][i, :size[i]] = target_global[i][:, 240:300].reshape(-1, 20, 3).clone()

    return target_jt_pos, size, target_all_info


def load_target_jt_one_motion(device, file, offset, fps=10, target_num_dof=19, dynamic_object=False, paste_last_motion=0, need_task_target_hand_positions=False):
    if file.endswith(".npy"):
        one_target_jt_pos = np.load(file).astype(np.float32)
        one_target_root_rotation = None
        one_target_root_translation = None
        one_target_link_global_pose = None
        one_target_link_global_pos = None
        one_target_link_global_ori = None
        one_target_link_global_vel = None
        one_target_link_global_ang_vel = None
    elif file.endswith(".npz"):
        data = np.load(file, allow_pickle=True)["arr_0"].item()
        one_target_jt_pos = data["joint_angles"].astype(np.float32)
        
        one_target_root_rotation = data["global_rotations"].astype(np.float32)
        one_target_root_translation = data["global_translations"].astype(np.float32)

        one_target_link_global_pose = []
        one_target_link_global_pos = []
        one_target_link_global_ori = []
        one_target_link_global_vel = []
        one_target_link_global_ang_vel = []
        for link_name in data["link_global_poses"]:
            T = data["link_global_poses"][link_name].astype(np.float32)  # (N_frame, 4, 4)
            one_target_link_global_pose.append(T.copy())  # (N_frame, 4, 4)
            pos = T[:, :3, 3].copy()
            one_target_link_global_pos.append(pos)  # (N_frame, 3)
            ori = T[:, :3, :2].copy()
            one_target_link_global_ori.append(ori.reshape(-1, 6))  # (N_frame, 6)
            vel = np.zeros((T.shape[0], 3))
            vel[1:] = (pos[1:] - pos[:-1]) * fps
            one_target_link_global_vel.append(vel)  # (N_frame, 3)
            ang_vel = np.zeros((T.shape[0], 3))
            ang_vel[1:] = (quat_to_rotvec(quat_from_matrix(torch.from_numpy(T[1:, :3, :3] @ np.linalg.inv(T[:-1, :3, :3])))) * fps).detach().cpu().numpy()
            one_target_link_global_ang_vel.append(ang_vel)  # (N_frame, 3)
        one_target_link_global_pose = np.float32(one_target_link_global_pose)[:20].transpose(1, 0, 2, 3)  # (N_frame, 20, 4, 4)
        one_target_link_global_pos = np.float32(one_target_link_global_pos)[:20].transpose(1, 0, 2)  # (N_frame, 20, 3)
        one_target_link_global_ori = np.float32(one_target_link_global_ori)[:20].transpose(1, 0, 2)  # (N_frame, 20, 6)
        one_target_link_global_vel = np.float32(one_target_link_global_vel)[:20].transpose(1, 0, 2)  # (N_frame, 20, 3)
        one_target_link_global_ang_vel = np.float32(one_target_link_global_ang_vel)[:20].transpose(1, 0, 2)  # (N_frame, 20, 3)

        if dynamic_object:
            object_poses = data["object_poses"].astype(np.float32)  # (N_frame, 4, 4)
            target_object_translation = torch.from_numpy(object_poses[:, :3, 3]).unsqueeze(0).to(dtype=torch.float32).to(device)  # (1, N_frame, 3)
            target_object_vel = torch.zeros(target_object_translation.shape).to(dtype=torch.float32).to(device)
            target_object_vel[:, 1:] = (target_object_translation[:, 1:] - target_object_translation[:, :-1]) * fps
            object_rotations = torch.from_numpy(object_poses[:, :3, :3]).to(dtype=torch.float32).to(device)  # (N_frame, 3, 3)
            target_object_rotation = quat_from_matrix(object_rotations).unsqueeze(0)  # (1, N_frame, 4)
            target_object_ang_vel = torch.zeros(target_object_translation.shape).to(dtype=torch.float32).to(device)
            target_object_ang_vel[:, 1:] = (quat_to_rotvec(quat_from_matrix(torch.from_numpy(object_poses[1:, :3, :3] @ np.linalg.inv(object_poses[:-1, :3, :3])))) * fps).unsqueeze(0).to(dtype=torch.float32).to(device)
        if need_task_target_hand_positions:
            N_frame = one_target_link_global_pose.shape[0]
            task_target_hand_positions = torch.from_numpy(data["task_target_hand_positions"].astype(np.float32)).unsqueeze(0).unsqueeze(0).repeat(1, N_frame, 1, 1)  # (1, N_frame, 2, 3)

    
    one_target_jt_vel = np.zeros(one_target_jt_pos.shape).astype(np.float32)
    one_target_jt_vel[1:] = (one_target_jt_pos[1:] - one_target_jt_pos[:-1]) * fps

    one_target_jt_pos = torch.from_numpy(one_target_jt_pos).to(device)
    target_jt_pos = one_target_jt_pos.unsqueeze(0)
    if offset.shape[1] == 10:
        delta = torch.cat((offset, torch.zeros((1, 9), dtype=torch.float32, device=offset.device, requires_grad=False)), dim=1)
    elif offset.shape[1] == 19:
        delta = offset
    else:
        raise NotImplementedError
    target_jt_pos += delta
    one_target_jt_vel = torch.from_numpy(one_target_jt_vel).to(device)
    target_jt_vel = one_target_jt_vel.unsqueeze(0)

    # remove redundant joints
    assert (target_num_dof == 10) or (target_num_dof == 19)
    target_jt_pos = target_jt_pos[..., :target_num_dof]
    target_jt_vel = target_jt_vel[..., :target_num_dof]

    size = torch.tensor([one_target_jt_pos.shape[0]]).to(dtype=torch.long).to(device)
    assert target_jt_pos.shape == (1, size, target_num_dof)
    assert target_jt_vel.shape == (1, size, target_num_dof)

    if not one_target_root_rotation is None:
        one_target_root_rotation = torch.from_numpy(one_target_root_rotation).to(device)
        target_root_rotation = one_target_root_rotation.unsqueeze(0)
        assert target_root_rotation.shape == (1, size, 3)
    else:
        target_root_rotation = None
    if not one_target_root_translation is None:
        one_target_root_translation = torch.from_numpy(one_target_root_translation).to(device)
        target_root_translation = one_target_root_translation.unsqueeze(0)
        assert target_root_translation.shape == (1, size, 3)
    else:
        target_root_translation = None
    
    if not one_target_link_global_pose is None:
        target_link_global_pose = torch.from_numpy(one_target_link_global_pose)[:, :target_num_dof+1].unsqueeze(0).to(device)
        target_link_global_pos = torch.from_numpy(one_target_link_global_pos)[:, :target_num_dof+1].unsqueeze(0).to(device)
        target_link_global_ori = torch.from_numpy(one_target_link_global_ori)[:, :target_num_dof+1].unsqueeze(0).to(device)
        target_link_global_vel = torch.from_numpy(one_target_link_global_vel)[:, :target_num_dof+1].unsqueeze(0).to(device)
        target_link_global_ang_vel = torch.from_numpy(one_target_link_global_ang_vel)[:, :target_num_dof+1].unsqueeze(0).to(device)
    else:
        target_link_global_pose, target_link_global_pos, target_link_global_ori, target_jt_vel, target_link_global_ang_vel = None, None, None, None, None

    target_all_info = {
        "target_jt_pos": target_jt_pos,  # (1, N_frame, 10/19)
        "target_jt_vel": target_jt_vel,  # (1, N_frame, 10/19)
        "target_root_rotation": target_root_rotation,  # (1, N_frame, 3)
        "target_root_translation": target_root_translation,  # (1, N_frame, 3)
        "target_link_global_pose": target_link_global_pose,  # (1, N_frame, 11/20, 4, 4)
        "target_link_global_pos": target_link_global_pos,  # (1, N_frame, 11/20, 3)
        "target_link_global_ori": target_link_global_ori,  # (1, N_frame, 11/20, 6)
        "target_link_global_vel": target_link_global_vel,  # (1, N_frame, 11/20, 3)
        "target_link_global_ang_vel": target_link_global_ang_vel,  # (1, N_frame, 11/20, 3)
    }
    if dynamic_object:
        target_all_info["target_object_translation"] = target_object_translation  # (1, N_frame, 3)
        target_all_info["target_object_rotation"] = target_object_rotation  # (1, N_frame, 4)
        target_all_info["target_object_vel"] = target_object_vel  # (1, N_frame, 3)
        target_all_info["target_object_ang_vel"] = target_object_ang_vel  # (1, N_frame, 3)
    if need_task_target_hand_positions:
        target_all_info["task_target_hand_positions"] = task_target_hand_positions  # (1, N_frame, 2, 3)

    if paste_last_motion:
        repeat_times = [1] * len(target_jt_pos.shape)
        repeat_times[1] = paste_last_motion
        target_jt_pos = torch.cat((target_jt_pos.clone(), target_jt_pos[:, -1:].clone().repeat(repeat_times)), dim=1)
        size += paste_last_motion
        for key in target_all_info:
            repeat_times = [1] * len(target_all_info[key].shape)
            repeat_times[1] = paste_last_motion
            target_all_info[key] = torch.cat((target_all_info[key].clone(), target_all_info[key][:, -1:].clone().repeat(repeat_times)), dim=1)
        for key in target_all_info:
            if "_vel" in key:
                target_all_info[key][:, -paste_last_motion:] = 0

    return target_jt_pos, size, target_all_info


def load_target_jt_one_scene_multiple_motions(device, file_dir, offset, fps=10, motion_speeds=[1], target_num_dof=19, dynamic_object=False, paste_last_motion=0, need_task_target_hand_positions=False, max_frame_number=None):

    target_jt_pos_list = []
    size_list = []
    target_all_info_list = {
        "target_jt_pos": [],
        "target_jt_vel": [],
        "target_root_rotation": [],
        "target_root_translation": [],
        "target_link_global_pose": [],
        "target_link_global_pos": [],
        "target_link_global_ori": [],
        "target_link_global_vel": [],
        "target_link_global_ang_vel": [],
    }
    if dynamic_object:
        target_all_info_list["target_object_translation"] = []
        target_all_info_list["target_object_rotation"] = []
        target_all_info_list["target_object_vel"] = []
        target_all_info_list["target_object_ang_vel"] = []
    if need_task_target_hand_positions:
        target_all_info_list["task_target_hand_positions"] = []

    motion_dir = join(file_dir, "h1_kinematic_motions")
    assert isdir(motion_dir), motion_dir
    file_names = []
    for file_name in os.listdir(motion_dir):
        if file_name.endswith(".npz"):
            file_names.append(file_name)
    file_names.sort()
    for file_name in file_names:
        one_sequence_target_jt_pos, one_sequence_size, one_sequence_target_all_info = load_target_jt_one_motion(device, join(motion_dir, file_name), offset, fps, target_num_dof=target_num_dof, dynamic_object=dynamic_object, paste_last_motion=paste_last_motion, need_task_target_hand_positions=need_task_target_hand_positions)
        for motion_speed in motion_speeds:  # data augmentation
            target_jt_pos_list.append(one_sequence_target_jt_pos[0, ::motion_speed])
            size_list.append(((one_sequence_size[0].item() - 1) // motion_speed) + 1)
            for key in target_all_info_list:
                target_all_info_list[key].append(one_sequence_target_all_info[key][0, ::motion_speed])

    if len(size_list) == 0:
        return None, None, None

    size = torch.tensor(size_list).to(dtype=torch.long).to(device)
    N_seq = size.shape[0]
    max_size = size.max().item()
    
    target_jt_pos = torch.zeros(N_seq, max_size, target_jt_pos_list[0].shape[-1]).to(dtype=torch.float32).to(device)
    target_all_info = {}
    for key in target_all_info_list:
        target_all_info[key] = torch.zeros((N_seq, max_size) + target_all_info_list[key][0].shape[1:]).to(dtype=torch.float32).to(device)
    
    for i in range(N_seq):
        target_jt_pos[i, :size[i].item()] = target_jt_pos_list[i].clone().to(dtype=torch.float32).to(device)
        for key in target_all_info:
            target_all_info[key][i, :size[i].item()] = target_all_info_list[key][i].clone().to(dtype=torch.float32).to(device)

    if not max_frame_number is None:
        target_jt_pos = target_jt_pos[:, :max_frame_number]
        size = size.clamp(None, max_frame_number)
        for key in target_all_info:
            target_all_info[key] = target_all_info[key][:, :max_frame_number]

    return target_jt_pos, size, target_all_info


def load_target_jt_multiple_scene_multiple_motions(device, file_dir, scene_names, offset, fps=10, motion_speeds=[1], target_num_dof=19, dynamic_object=False, paste_last_motion=0, need_task_target_hand_positions=False, max_frame_number=None):
    """
    return:
        target_jt_pos: torch.float32, shape = (N_seq, max_size, 19)
        size: torch.long, shape = (N_seq,)
        target_all_info: dict, each value is torch.float32 with shape (N_seq, max_size, ...)
        scene_motion_range: list, len = scene number, each item is [motion_index_min, motion_index_max + 1]
    """

    target_jt_pos_list, size_list, maxsize_list, scene_motion_range = [], [], [], []
    target_all_info_list = {
        "target_jt_pos": [],
        "target_jt_vel": [],
        "target_root_rotation": [],
        "target_root_translation": [],
        "target_link_global_pose": [],
        "target_link_global_pos": [],
        "target_link_global_ori": [],
        "target_link_global_vel": [],
        "target_link_global_ang_vel": [],
    }
    if dynamic_object:
        target_all_info_list["target_object_translation"] = []
        target_all_info_list["target_object_rotation"] = []
        target_all_info_list["target_object_vel"] = []
        target_all_info_list["target_object_ang_vel"] = []
    if need_task_target_hand_positions:
        target_all_info_list["task_target_hand_positions"] = []
    N_seq = 0

    for scene_name in scene_names:
        one_scene_target_jt_pos, one_scene_size, one_scene_target_all_info = load_target_jt_one_scene_multiple_motions(device, join(file_dir, scene_name), offset, fps, motion_speeds=motion_speeds, target_num_dof=target_num_dof, dynamic_object=dynamic_object, paste_last_motion=paste_last_motion, need_task_target_hand_positions=need_task_target_hand_positions, max_frame_number=max_frame_number)
        assert not one_scene_target_jt_pos is None
        print("[load_target_jt_multiple_scene_multiple_motions] finish loading {}, sequence number = {}, max_size = {}".format(join(file_dir, scene_name), one_scene_target_jt_pos.shape[0], one_scene_size.max().item()))
        scene_motion_range.append([N_seq, N_seq + one_scene_target_jt_pos.shape[0]])
        target_jt_pos_list.append(one_scene_target_jt_pos)
        size_list.append(one_scene_size)
        maxsize_list.append(one_scene_size.max().item())
        for key in target_all_info_list:
            target_all_info_list[key].append(one_scene_target_all_info[key])
        N_seq += one_scene_target_jt_pos.shape[0]
    
    size = torch.cat(size_list).to(dtype=torch.long).to(device)
    N_seq = size.shape[0]
    max_size = size.max().item()

    target_jt_pos = torch.zeros(N_seq, max_size, target_jt_pos_list[0].shape[-1]).to(dtype=torch.float32).to(device)
    target_all_info = {}
    for key in target_all_info_list:
        target_all_info[key] = torch.zeros((N_seq, max_size) + target_all_info_list[key][0].shape[2:]).to(dtype=torch.float32).to(device)
    
    for i in range(len(scene_names)):
        target_jt_pos[scene_motion_range[i][0] : scene_motion_range[i][1], :maxsize_list[i]] = target_jt_pos_list[i].clone().to(dtype=torch.float32).to(device)
        for key in target_all_info:
            target_all_info[key][scene_motion_range[i][0] : scene_motion_range[i][1], :maxsize_list[i]] = target_all_info_list[key][i].clone().to(dtype=torch.float32).to(device)

    return target_jt_pos, size, target_all_info, scene_motion_range


def load_target_jt_tracked_motions(device, pkl_path, offset, fps=10, target_num_dof=19):
    data = pickle.load(open(pkl_path, "rb"))

    target_jt_pos_list, size_list, scene_motion_range, scene_mesh_paths = [], [], [], []
    target_all_info_list = {
        "target_jt_pos": [],
        "target_jt_vel": [],
        "target_root_rotation": [],
        "target_root_translation": [],
        "target_link_global_pose": [],
        "target_link_global_pos": [],
        "target_link_global_ori": [],
        "target_link_global_vel": [],
        "target_link_global_ang_vel": [],
    }

    offset_np = offset.reshape(1, target_num_dof).detach().cpu().numpy()

    for i, seq in enumerate(data):
        scene_mesh_paths.append(seq["scene_mesh_path"])
        scene_motion_range.append([i, i+1])

        seq_input = seq["tracker_inputs"][::(50//fps)]  # (N, 399)
        assert seq_input.shape[1] == 399
        N = seq_input.shape[0]
        size_list.append(N)

        jt_pos = seq_input[:, :target_num_dof].copy() + offset_np
        target_jt_pos_list.append(jt_pos)
        target_all_info_list["target_jt_pos"].append(jt_pos)
        target_all_info_list["target_jt_vel"].append(seq_input[:, target_num_dof:target_num_dof*2].copy())

        root_ori_xyzw = seq_input[:, target_num_dof*17+18:target_num_dof*17+22].copy()  # (N, 4)
        root_ori = quat_to_rotvec(torch.from_numpy(root_ori_xyzw)).detach().cpu().numpy()  # (N, 3)
        target_all_info_list["target_root_rotation"].append(root_ori)
        target_all_info_list["target_root_translation"].append(seq_input[:, target_num_dof*17+15:target_num_dof*17+18].copy())

        root2world = np.eye(4).reshape(1, 4, 4).repeat(N, axis=0)
        root2world[:, :3, :3] = quat_to_matrix(torch.from_numpy(root_ori_xyzw)).detach().cpu().numpy()
        root2world[:, :3, 3] = seq_input[:, target_num_dof*17+15:target_num_dof*17+18].copy()

        link_pos_to_root = seq_input[:, target_num_dof*2:target_num_dof*5+3].reshape(N, target_num_dof+1, 3).copy()
        link_pos_to_world = (link_pos_to_root @ root2world[:, :3, :3].transpose(0, 2, 1)) + root2world[:, :3, 3].reshape(N, 1, 3)
        target_all_info_list["target_link_global_pos"].append(link_pos_to_world)  # (N, 20, 3)
        link_ori_to_root = vec6d_to_matrix(torch.from_numpy(seq_input[:, target_num_dof*5+3:target_num_dof*11+9].reshape(N, target_num_dof+1, 3, 2))).detach().cpu().numpy()  # (N, 20, 3, 3)
        link_ori_to_world = root2world[:, :3, :3].reshape(N, 1, 3, 3) @ link_ori_to_root
        target_all_info_list["target_link_global_ori"].append(link_ori_to_world[:, :, :, :2].reshape(N, target_num_dof+1, 6))  # (N, 20, 6)

        link_pose_to_world = np.eye(4).reshape(1, 1, 4, 4).repeat(N, axis=0).repeat(target_num_dof+1, axis=1)
        link_pose_to_world[:, :, :3, :3] = link_ori_to_world.copy()
        link_pose_to_world[:, :, :3, 3] = link_pos_to_world.copy()
        target_all_info_list["target_link_global_pose"].append(link_pose_to_world)  # (N, 20, 4, 4)

        vel = np.zeros((N, target_num_dof+1, 3))
        vel[1:] = (link_pos_to_world[1:] - link_pos_to_world[:-1]) * fps
        target_all_info_list["target_link_global_vel"].append(vel)  # (N, 20, 3)
        ang_vel = np.zeros((N, target_num_dof+1, 3))
        ang_vel[1:] = (quat_to_rotvec(quat_from_matrix(torch.from_numpy(link_ori_to_world[1:] @ np.linalg.inv(link_ori_to_world[:-1]))).reshape((N-1) * (target_num_dof+1), 4)).reshape(N-1, target_num_dof+1, 3) * fps).detach().cpu().numpy()
        target_all_info_list["target_link_global_ang_vel"].append(ang_vel)  # (N, 20, 3)

    size = torch.tensor(size_list).to(dtype=torch.long).to(device)
    N_seq = size.shape[0]
    max_size = size.max().item()
    
    target_jt_pos = torch.zeros(N_seq, max_size, target_jt_pos_list[0].shape[-1]).to(dtype=torch.float32).to(device)
    target_all_info = {}
    for key in target_all_info_list:
        target_all_info[key] = torch.zeros((N_seq, max_size) + target_all_info_list[key][0].shape[1:]).to(dtype=torch.float32).to(device)
    
    for i in range(N_seq):
        target_jt_pos[i, :size[i]] = torch.from_numpy(target_jt_pos_list[i]).to(dtype=torch.float32).to(device)
        for key in target_all_info:
            target_all_info[key][i, :size[i]] = torch.from_numpy(target_all_info_list[key][i]).to(dtype=torch.float32).to(device)

    return target_jt_pos, size, target_all_info, scene_motion_range, scene_mesh_paths
