import os
from os.path import join, abspath, dirname, isfile, isdir
import sys
sys.path.append(join(dirname(abspath(__file__)), "../../.."))
import numpy as np
import pickle
from torch.utils.data import Dataset, IterableDataset, random_split
import torch
import pickle
import trimesh
import copy
import json
from transforms3d.quaternions import mat2quat
from dataset_statistics.info import ACTION_INFO, ACTION_DICT, N_ACTION, OBJ_CATEGORY, OBJ_CATEGORY_DICT, N_CATEGORY
from dataset_statistics.load_sequence_names import txt_to_sequence_names
from utils.time_align import load_sequence_names
from utils.parse_NOKOV import get_obj_names, get_obj_model_paths
from utils.parse_object import load_obj_mesh
from utils.hand import mano_params_to_hand_info
from utils.triplet_utils import txt2triplets
from visualization.visualize_HO_poses import visualize_HO_poses
from manopth.manopth.rodrigues_layer import batch_rodrigues
import open3d as o3d


def mat2xyzw(R):
    q = mat2quat(R)  # wxyz
    if q[0] < 0:
        q = -q
    q = np.float32([q[1], q[2], q[3], q[0]])  # xyzw
    return q


def T_to_7Dpose(T, order="xyzw"):
    if order == "xyzw":
        q = mat2xyzw(T[:3, :3])
    elif order == "wxyz":
        q = mat2quat(T[:3, :3])
        if q[0] < 0:
            q = -q
    return np.concatenate((T[:3, 3], q), axis=0)


class BiToolDataset(Dataset):
    def __init__(self, cfg):
        """
        cfg:
            * "mode": "train" / "test"
            * "test_set_level": None/1/2/3/4
            * "save_filepath"
            * "sequence_dirs": a list , an item is a dict:
                * "hand_pose_dir"
                * "object_pose_dir"
                * "object_model_paths": [tool_model_path, target_model_path]
                * "object_order": [tool_objname, target_objname]
            * "frame_rate": sample fps, commonly 10 Hz
            * "clip_time_range": the time interval for a clip, commonly 2s
            * "N_point": point number for each object model
            * "cvt_to_relative_pose": 转成预测每个instance相对当前帧的pose
            * "encode_TO": 为true则给出tool和target的one-hot category label
            * "resample_obj_points": 为true则每个clip重新采样物体点云
            * "info_flag": "": 数据包含双手双物, "RT_only": 只包含右手工具, "LO_only": 只包含左手对象
        """
        
        self.cfg = cfg
        mode = cfg["mode"]

        if mode == "train":
            save_filepath_list = [cfg["save_filepath"]]
            sequence_dir_list = [cfg["sequence_dirs"]]
        else:
            if cfg["testset_level"] is None:
                save_filepath_list = [cfg["save_filepath"][level] for level in cfg["save_filepath"]]
                sequence_dir_list = [cfg["sequence_dirs"][level] for level in cfg["sequence_dirs"]]
            else:
                save_filepath_list = [cfg["save_filepath"][cfg["testset_level"]]]
                sequence_dir_list = [cfg["sequence_dirs"][cfg["testset_level"]]]
        
        self.sequences = []

        for (save_filepath, sequence_dir) in zip(save_filepath_list, sequence_dir_list):
            if not isfile(save_filepath):
                cfg["save_filepath"] = save_filepath
                cfg["sequence_dirs"] = sequence_dir
                sequences = self.load_data(cfg)
                self.sequences += sequences
                np.savez(save_filepath, data=sequences)
            else:
                print("load from preprocessed files ...")
                sequences = np.load(save_filepath, allow_pickle=True)
                sequences = sequences["data"]

                sequences_final = []
                for i, seq in enumerate(sequences):
                    sequences_final.append((seq[0].astype(np.float32), seq[1].astype(np.float32), seq[2].astype(np.float32), seq[3].astype(np.float32), seq[4], seq[5], np.float32(seq[6])))
                self.sequences += sequences_final

        self.len = len(self.sequences)
        if self.cfg["mode"] == "train":
            self.len = self.len // 3

        # statistics
        seq_name_dict = {}
        for seq in self.sequences:
            seq_name_dict[seq[4]] = 1
        print("dataset sequence number =", len(list(seq_name_dict.keys())))
        print("dataset clip number =", self.len)
    
    def extent_triplet_info(self, date):
        dataset_dirs = ["/share/datasets/HOI-mocap", "/data2/HOI-mocap"]
        for dataset_dir in dataset_dirs:
            date_dir = join(dataset_dir, date)
            if not isdir(date_dir):
                continue
            for fn in os.listdir(date_dir):
                if not fn.endswith("_record.txt"):
                    continue
                print("expanding triplet_info from {} ...".format(join(date_dir, fn)))
                triplets = txt2triplets(join(date_dir, fn))
                for triplet in triplets:
                    self.triplet_info[date + "_" + str(triplet["video_idx"]).zfill(3)] = ACTION_DICT[triplet["action_label"]]
        # print(self.triplet_info)
        
    def load_data(self, cfg):
        
        # 做完所有预处理之后存成大文件, 每次直接读大文件, 不需要任何后续处理
        
        sequences = []
        
        stride = 30 // cfg["frame_rate"]
        clip_time_range = cfg["clip_time_range"]
        N_obj_point = cfg["N_point"]
        flag_cvt_to_relative_pose = cfg["cvt_to_relative_pose"]
        
        self.triplet_info = {}
        
        for sequence_idx, sequence_info in enumerate(cfg["sequence_dirs"]):
            
            print("processing {}".format(sequence_info["object_pose_dir"].split("/")[-2]))
            
            hand_pose_dir = sequence_info["hand_pose_dir"]
            obj_pose_dir = sequence_info["object_pose_dir"]
            obj_model_paths = sequence_info["object_model_paths"]
            obj_names = sequence_info["object_order"]  # 工具, 对象
            tool_objname, target_objname = obj_names
            sequence_name = obj_pose_dir.split("/")[-2]  # example: "20231006_217"
            if not sequence_name in self.triplet_info:
                self.extent_triplet_info(sequence_name.split("_")[0])
            
            action_feature = np.zeros(N_ACTION).astype(np.float32)  # fake
            action_label = self.triplet_info[sequence_name]
            action_feature[action_label] = 1.0
            
            # hand skeleton data
            _, right_hand_skeleton_data, right_hand_theta, right_hand_trans = mano_params_to_hand_info(join(hand_pose_dir, "right_hand.pkl"), mano_beta=np.zeros(10), side="right", max_cnt=None, return_pose=True)
            _, left_hand_skeleton_data, left_hand_theta, left_hand_trans = mano_params_to_hand_info(join(hand_pose_dir, "left_hand.pkl"), mano_beta=np.zeros(10), side="left", max_cnt=None, return_pose=True)
            hand_skeleton_data = np.concatenate((right_hand_skeleton_data[:, None, :, :], left_hand_skeleton_data[:, None, :, :]), axis=1)  # (N_frame, 2, 21, 3), [右手, 左手]
            N_frame = hand_skeleton_data.shape[0]
            assert hand_skeleton_data.shape == (N_frame, 2, 21, 3)
            
            # zero_pose_obj
            zero_pose_obj_mesh = [load_obj_mesh(p, unit=0.01) for p in obj_model_paths]
            zero_pose_obj = []
            for i in range(len(obj_model_paths)):
                vertices = zero_pose_obj_mesh[i].vertices
                faces = zero_pose_obj_mesh[i].faces
                mesh = o3d.geometry.TriangleMesh(vertices=o3d.utility.Vector3dVector(vertices), triangles=o3d.utility.Vector3iVector(faces))
                pcd = mesh.sample_points_uniformly(number_of_points=N_obj_point)
                zero_pose_obj.append(np.float32(pcd.points))
            zero_pose_obj = np.float32(zero_pose_obj)  # (2, N_obj_point, 3), [工具, 对象]
            assert zero_pose_obj.shape == (2, N_obj_point, 3)
            
            # obj_pose_data
            tool_T = np.load(join(obj_pose_dir, tool_objname + ".npy"))  # (N_frame, 4, 4)
            target_T = np.load(join(obj_pose_dir, target_objname + ".npy"))  # (N_frame, 4, 4)
            obj_pose_data = []
            for i in range(N_frame):
                tool_pose = T_to_7Dpose(tool_T[i])[None, :]
                target_pose = T_to_7Dpose(target_T[i])[None, :]
                obj_pose_data.append(np.concatenate((tool_pose, target_pose), axis=0))
            obj_pose_data = np.float32(obj_pose_data)  # (N_frame, 2, 7), [工具, 对象], [x, y, z, qx, qy, qz, qw]
            assert obj_pose_data.shape == (N_frame, 2, 7)
            
            # obj_point_data
            tool_point_data = (tool_T[:, :3, :3] @ zero_pose_obj[0].transpose(1, 0) + tool_T[:, :3, 3:]).transpose(0, 2, 1)
            target_point_data = (target_T[:, :3, :3] @ zero_pose_obj[1].transpose(1, 0) + target_T[:, :3, 3:]).transpose(0, 2, 1)
            obj_point_data = np.concatenate((tool_point_data[:, None, :, :], target_point_data[:, None, :, :]), axis=1)
            assert obj_point_data.shape == (N_frame, 2, N_obj_point, 3)
            
            if sequence_idx % 20 == 0:
                visualization_save_dir = join(dirname(self.cfg["save_filepath"]), "visualization")
                os.makedirs(visualization_save_dir, exist_ok=True)
                visualize_HO_poses(join(visualization_save_dir, "{}_{}_{}.gif").format(str(sequence_idx), ACTION_INFO[action_label], sequence_name), hand_skeleton_data, obj_point_data)
            
            # clip (把一段sequence切成很多段L帧的数据, 帧率为cfg["frame_rate"])
            for i in range(0, N_frame - 20, stride):  # 滑动窗口, 每次滑动0.1s
                
                clip_hand_skeleton_data = hand_skeleton_data[i : i + 20 : stride].copy()
                clip_obj_point_data = obj_point_data[i : i + 20 : stride].copy()
                clip_obj_pose_data = obj_pose_data[i : i + 20 : stride].copy()
                clip_zero_pose_obj = zero_pose_obj.copy()
                
                # if flag_cvt_to_relative_pose:  # 放到每个instance各自当前帧的坐标系下，当前帧的信息不变
                #     L = 30 * clip_time_range // stride
                #     anchor_idx = (L // 2) - 1
                #     anchor_frame_idx = i + anchor_idx * stride
                #     # right hand
                #     right_wrist2world = np.zeros((L, 4, 4))
                #     right_wrist2world[:, :3, :3] = batch_rodrigues(torch.from_numpy(right_hand_theta[i : i + 30 * clip_time_range : stride, :3])).reshape(-1, 3, 3).numpy()  # (L, 3, 3)
                #     right_wrist2world[:, :3, 3] = right_hand_trans[i : i + 30 * clip_time_range : stride]
                #     right_wrist2world[:, 3, 3] = 1
                #     anchor_right_pose = clip_hand_skeleton_data[anchor_idx, 0].copy()
                #     clip_hand_skeleton_data[:, 0] = (clip_hand_skeleton_data[:, 0] - right_wrist2world[anchor_idx, :3, 3][None, :]) @ right_wrist2world[anchor_idx, :3, :3]  # in anchor_idx's MANO space
                #     clip_hand_skeleton_data[anchor_idx, 0] = anchor_right_pose
                #     # left hand
                #     left_wrist2world = np.zeros((L, 4, 4))
                #     left_wrist2world[:, :3, :3] = batch_rodrigues(torch.from_numpy(left_hand_theta[i : i + 30 * clip_time_range : stride, :3])).reshape(-1, 3, 3).numpy()  # (L, 3, 3)
                #     left_wrist2world[:, :3, 3] = left_hand_trans[i : i + 30 * clip_time_range : stride]
                #     left_wrist2world[:, 3, 3] = 1
                #     anchor_left_pose = clip_hand_skeleton_data[anchor_idx, 1].copy()
                #     clip_hand_skeleton_data[:, 1] = (clip_hand_skeleton_data[:, 1] - left_wrist2world[anchor_idx, :3, 3][None, :]) @ left_wrist2world[anchor_idx, :3, :3]  # in anchor_idx's MANO space
                #     clip_hand_skeleton_data[anchor_idx, 1] = anchor_left_pose
                #     # tool and target
                #     clip_zero_pose_obj = clip_obj_point_data[anchor_idx]  # NOTE: change zero_pose_obj !!!
                #     anchor_tool_T = tool_T[anchor_frame_idx].copy()  # (4, 4)
                #     anchor_target_T = target_T[anchor_frame_idx].copy()  # (4, 4)
                #     for j in range(L):
                #         if j == anchor_idx:
                #             continue
                #         T = tool_T[i + j * stride] @ np.linalg.inv(anchor_tool_T)
                #         clip_obj_pose_data[j, 0] = T_to_7Dpose(T)
                #         clip_obj_point_data[j, 0] = (clip_obj_point_data[j, 0] - anchor_tool_T[:3, 3]) @ anchor_tool_T[:3, :3]
                #         T = target_T[i + j * stride] @ np.linalg.inv(anchor_target_T)
                #         clip_obj_pose_data[j, 1] = T_to_7Dpose(T)
                #         clip_obj_point_data[j, 1] = (clip_obj_point_data[j, 1] - anchor_target_T[:3, 3]) @ anchor_target_T[:3, :3]
                if flag_cvt_to_relative_pose:  # 放到target当前帧的坐标系下
                    L = 20
                    anchor_idx = (L // 2) - 1
                    anchor_frame_idx = i + anchor_idx * stride
                    anchor_target_T = target_T[anchor_frame_idx].copy()  # (4, 4)
                    clip_hand_skeleton_data = (clip_hand_skeleton_data - anchor_target_T[:3, 3]) @ anchor_target_T[:3, :3]
                    clip_obj_point_data = (clip_obj_point_data - anchor_target_T[:3, 3]) @ anchor_target_T[:3, :3]
                    for j in range(L):
                        # T = tool_T[i + j * stride] @ np.linalg.inv(anchor_target_T)
                        T = np.linalg.inv(anchor_target_T) @ tool_T[i + j * stride]
                        clip_obj_pose_data[j, 0] = T_to_7Dpose(T)
                        # T = target_T[i + j * stride] @ np.linalg.inv(anchor_target_T)
                        T = np.linalg.inv(anchor_target_T) @ target_T[i + j * stride]
                        clip_obj_pose_data[j, 1] = T_to_7Dpose(T)
            
                # 可改成单手单物
                sequences.append((clip_hand_skeleton_data, clip_obj_point_data, clip_obj_pose_data, clip_zero_pose_obj, sequence_name, obj_names, action_feature))
            
            print("current dataset clip number =", len(sequences))
        
        return sequences

    def __getitem__(self, idx):
        if self.cfg["mode"] == "train":
            seq_data = list(self.sequences[idx * 3])
        else:
            seq_data = list(self.sequences[idx])
        
        # resample obj points
        if self.cfg["resample_obj_points"]:
            ids = np.random.choice(np.arange(0, seq_data[3].shape[1]), self.cfg["N_point"], replace=True)
            seq_data[1] = seq_data[1][:, :, ids]
            seq_data[3] = seq_data[3][:, ids]
        
        c_encoding = np.zeros((2, N_CATEGORY)).astype(np.float32)  # (2, N_category)
        if self.cfg["encode_TO"]:
            c_tool = OBJ_CATEGORY_DICT[OBJ_CATEGORY[int(seq_data[5][0])]]
            c_encoding[0, c_tool] = 1
            c_target = OBJ_CATEGORY_DICT[OBJ_CATEGORY[int(seq_data[5][1])]]
            c_encoding[1, c_target] = 1
        seq_data.append(c_encoding)
        
        assert len(seq_data) == 8
        # 清除不需要的信息
        if self.cfg["info_flag"] == "RT_only":  # 只要右手和工具
            seq_data[0][:, 1] = 0.0
            seq_data[1][:, 1] = 0.0
            seq_data[2][:, 1] = 0.0
            seq_data[2][:, 1, 6] = 1.0
            seq_data[3][1] = 0.0
            seq_data[7][1] = 0.0
        elif self.cfg["info_flag"] == "LO_only":  # 只要左手和对象
            seq_data[0][:, 0] = 0.0
            seq_data[1][:, 0] = 0.0
            seq_data[2][:, 0] = 0.0
            seq_data[2][:, 0, 6] = 1.0
            seq_data[3][0] = 0.0
            seq_data[7][0] = 0.0
        return seq_data

    def __len__(self):
        return self.len
    

def get_sequence_dirs(dates, dataset_dirs, hand_pose_root_dir, obj_pose_root_dirs, obj_model_root_dir, sequence_names=None):
    """
    sequence_names != None -> use sequence_names instead of dates
    """
    
    overall_sequence_names = []
    if not sequence_names is None:
        overall_sequence_names = sequence_names
    else:
        date_dirs = []
        for date in dates:
            date_dir = None
            for dd in dataset_dirs:
                if not isfile(join(dd, date, date + "_valid_video_id.txt")):
                    continue
                date_dir = join(dd, date)
                break
            date_dirs.append(date_dir)
        for (date, date_dir) in zip(dates, date_dirs):
            seq_names = load_sequence_names(join(date_dir, date + "_valid_video_id.txt"))
            overall_sequence_names = overall_sequence_names + seq_names
    
    sequence_dirs = []
    
    for sequence_name in overall_sequence_names:
        date = sequence_name.split("_")[0]
        object_pose_dir = None
        for obj_pose_root_dir in obj_pose_root_dirs:
            if isdir(join(obj_pose_root_dir, date, sequence_name, "objpose")):
                object_pose_dir = join(obj_pose_root_dir, date, sequence_name, "objpose")
                break
        hand_pose_dir = join(hand_pose_root_dir, date, sequence_name, "mano_wo_contact")
        if (object_pose_dir is None) or (not isdir(object_pose_dir)) or (len(os.listdir(object_pose_dir)) != 2):
            print("[error] incomplete object poses in {}".format(sequence_name))
            continue
        if (not isfile(join(hand_pose_dir, "right_hand.pkl"))) or (not isfile(join(hand_pose_dir, "left_hand.pkl"))):
            print("[error] incomplete hand poses in {}".format(sequence_name))
            continue
        
        dataset_dir = None
        for dd in dataset_dirs:
            if not isfile(join(dd, date, date + "_valid_video_id.txt")):
                continue
            dataset_dir = dd
            break
        assert not dataset_dir is None
        object_order = get_obj_names(join(dataset_dir, date, sequence_name, "nokov"))  # [工具, 对象]
        object_model_paths = [join(obj_model_root_dir, x + "_cm.obj") for x in object_order]
        
        print("successfully add {} !!!".format(sequence_name))
        sequence_dirs.append({
            "object_pose_dir": object_pose_dir,
            "hand_pose_dir": hand_pose_dir,
            "object_model_paths": object_model_paths,
            "object_order": object_order,
        })
        # if len(sequence_dirs) >= 10:
        #     break
    
    # sequence_dirs = sequence_dirs[:1]  # TODO: debugging
    
    return sequence_dirs


def get_datasets(N_point=50, testset_level=None, cvt_to_relative_pose=False, encode_TO=False, info_flag=""):
    
    # training_dates = ["20230917", "20230927", "20230928"]
    # test_dates = ["20230923", "20230929"]
    # # training_dates = ["20230917"]
    # # test_dates = ["20230917"]
    training_dates = None
    test_dates = None
    # training_sequence_names = json.load(open("/home/liuyun/HOI-mocap/dataset_statistics/training_set_sequence_names.json", "r"))
    # test_sequence_names = json.load(open("/home/liuyun/HOI-mocap/dataset_statistics/test_set_sequence_names.json", "r"))
    training_sequence_names = txt_to_sequence_names("/home/liuyun/HOI-mocap/dataset_statistics/train_test_split_20231112.txt", "train")
    test_1_sequence_names = txt_to_sequence_names("/home/liuyun/HOI-mocap/dataset_statistics/train_test_split_20231112.txt", "test_1")
    test_2_sequence_names = txt_to_sequence_names("/home/liuyun/HOI-mocap/dataset_statistics/train_test_split_20231112.txt", "test_2")
    test_3_sequence_names = txt_to_sequence_names("/home/liuyun/HOI-mocap/dataset_statistics/train_test_split_20231112.txt", "test_3")
    test_4_sequence_names = txt_to_sequence_names("/home/liuyun/HOI-mocap/dataset_statistics/train_test_split_20231112.txt", "test_4")
    
    dataset_dirs = ["/data2/HOI-mocap"]
    hand_pose_root_dir = "/data2/hlyang/results/dataset"
    obj_pose_root_dirs = ["/data2/HOI-mocap/HO_poses"]
    obj_model_root_dir = "/data2/HOI-mocap/object_models_final_simplied"
    
    training_save_filepath = "/home/liuyun/HOI-mocap/motion_forecasting/data/{}_{}_50.npz".format("train", "relative_to_target" if cvt_to_relative_pose else "absolute")
    if not isfile(training_save_filepath):
        training_sequence_dirs = get_sequence_dirs(training_dates, dataset_dirs, hand_pose_root_dir, obj_pose_root_dirs, obj_model_root_dir, sequence_names=training_sequence_names)
    else:
        training_sequence_dirs = None
    
    test_1_save_filepath = "/home/liuyun/HOI-mocap/motion_forecasting/data/{}_{}_50.npz".format("test_1", "relative_to_target" if cvt_to_relative_pose else "absolute")
    if not isfile(test_1_save_filepath):
        test_1_sequence_dirs = get_sequence_dirs(test_dates, dataset_dirs, hand_pose_root_dir, obj_pose_root_dirs, obj_model_root_dir, sequence_names=test_1_sequence_names)
    else:
        test_1_sequence_dirs = None
    test_2_save_filepath = "/home/liuyun/HOI-mocap/motion_forecasting/data/{}_{}_50.npz".format("test_2", "relative_to_target" if cvt_to_relative_pose else "absolute")
    if not isfile(test_2_save_filepath):
        test_2_sequence_dirs = get_sequence_dirs(test_dates, dataset_dirs, hand_pose_root_dir, obj_pose_root_dirs, obj_model_root_dir, sequence_names=test_2_sequence_names)
    else:
        test_2_sequence_dirs = None
    test_3_save_filepath = "/home/liuyun/HOI-mocap/motion_forecasting/data/{}_{}_50.npz".format("test_3", "relative_to_target" if cvt_to_relative_pose else "absolute")
    if not isfile(test_3_save_filepath):
        test_3_sequence_dirs = get_sequence_dirs(test_dates, dataset_dirs, hand_pose_root_dir, obj_pose_root_dirs, obj_model_root_dir, sequence_names=test_3_sequence_names)
    else:
        test_3_sequence_dirs = None
    test_4_save_filepath = "/home/liuyun/HOI-mocap/motion_forecasting/data/{}_{}_50.npz".format("test_4", "relative_to_target" if cvt_to_relative_pose else "absolute")
    if not isfile(test_4_save_filepath):
        test_4_sequence_dirs = get_sequence_dirs(test_dates, dataset_dirs, hand_pose_root_dir, obj_pose_root_dirs, obj_model_root_dir, sequence_names=test_4_sequence_names)
    else:
        test_4_sequence_dirs = None
    
    # 1 training set
    cfg = {
        "mode": "train",
        "testset_level": None,
        "save_filepath": training_save_filepath,
        "sequence_dirs": training_sequence_dirs,
        "frame_rate": 30,
        "clip_time_range": 2/3,
        "N_point": N_point,
        "cvt_to_relative_pose": cvt_to_relative_pose,
        "encode_TO": encode_TO,
        "resample_obj_points": True,
        "info_flag": info_flag,
    }
    train_set = BiToolDataset(cfg)

    # 1 test set (merge from 4 sets)
    test_save_filepaths = {"test_1": test_1_save_filepath, "test_2": test_2_save_filepath, "test_3": test_3_save_filepath, "test_4": test_4_save_filepath}
    test_sequence_dirs = {"test_1": test_1_sequence_dirs, "test_2": test_2_sequence_dirs, "test_3": test_3_sequence_dirs, "test_4": test_4_sequence_dirs}
    cfg = {
        "mode": "test",
        "testset_level": testset_level,
        "save_filepath": test_save_filepaths,
        "sequence_dirs": test_sequence_dirs,
        "frame_rate": 30,
        "clip_time_range": 2/3,
        "N_point": N_point,
        "cvt_to_relative_pose": cvt_to_relative_pose,
        "encode_TO": encode_TO,
        "resample_obj_points": True,
        "info_flag": info_flag,
    }
    test_set = BiToolDataset(cfg)
    
    # return train_set, train_set, train_set, train_set  # debug!!!
    return train_set, test_set, test_set, test_set


if __name__ == "__main__":
    
    ###############################################################################
    dates = ["20230917", "20230927", "20230928", "20230929"]
    
    dataset_dir = "/share/datasets/HOI-mocap"
    hand_pose_root_dir = "/share/hlyang/results/dataset"
    obj_pose_root_dir = "/share/datasets/HOI-mocap/HO_poses"
    obj_model_root_dir = "/share/datasets/HOI-mocap/object_models_points/50"
    ###############################################################################
    
    sequence_dirs = get_sequence_dirs(dates, dataset_dir, hand_pose_root_dir, obj_pose_root_dir, obj_model_root_dir)
    
    cfg = {
        "sequence_dirs": sequence_dirs,
        "frame_rate": 10,
        "clip_time_range": 2,
        "N_point": 50,
    }
    
    train_bitool = BiToolDataset(cfg)
