import os
import pickle
import os.path as osp
import torch.utils.data as tordata
import json
from utils import get_msg_mgr


class MultiSubjects_X(tordata.Dataset):
    def __init__(self, data_cfg, training):
        self.__dataset_parser(data_cfg, training)
        self.cache = data_cfg['cache']
        self.label_list = [seq_info[0] for seq_info in self.seqs_info]
        self.types_list = [seq_info[1] for seq_info in self.seqs_info]
        self.views_list = [seq_info[2] for seq_info in self.seqs_info]

        self.label_set = sorted(list(set(self.label_list)))
        self.types_set = sorted(list(set(self.types_list)))
        self.views_set = sorted(list(set(self.views_list)))

        self.seqs_data = [None] * len(self)
        self.indices_dict = {label: [] for label in self.label_set}
        for i, seq_info in enumerate(self.seqs_info):
            self.indices_dict[seq_info[0]].append(i)
        if self.cache:
            self.__load_all_data()

    def __len__(self):
        return len(self.seqs_info)

    def __loader__(self, paths):
        paths = sorted(paths)
        data_list = []
        for pths in paths:
            sil_pth = pths[0]
            smpl_pth = pths[1]

            with open(sil_pth, 'rb') as f:
                sil_data = pickle.load(f)

            with open(smpl_pth, 'rb') as f:
                smpl_data = pickle.load(f)

            if len(sil_data) != len(smpl_data):
                raise ValueError(f"Silhouette and SMPL data must have the same length: {sil_pth}, {smpl_pth}")
            if len(sil_data) == 0 or len(smpl_data) == 0:
                raise ValueError(f"Empty data detected in: {sil_pth}, {smpl_pth}")

            data_list.append([sil_data, smpl_data])
        return data_list

    def __getitem__(self, idx):
        if not self.cache:
            data_list = self.__loader__(self.seqs_info[idx][-1])
        elif self.seqs_data[idx] is None:
            data_list = self.__loader__(self.seqs_info[idx][-1])
            self.seqs_data[idx] = data_list
        else:
            data_list = self.seqs_data[idx]
        seq_info = self.seqs_info[idx]
        return data_list, seq_info

    def __load_all_data(self):
        for idx in range(len(self)):
            self.__getitem__(idx)

    def __load_seqs_into_list(self, dataset_root, smpl_root, lab, cam, seq, seqs_info_list, data_in_use):
        msg_mgr = get_msg_mgr()
        seq_path = osp.join(dataset_root, lab, cam, seq)
        smpl_path = osp.join(smpl_root, lab, cam, seq)

        if not (osp.exists(seq_path) and osp.exists(smpl_path)):
            msg_mgr.log_debug(f"Missing path: {seq_path} or {smpl_path}")
            return

        sil_files = sorted(os.listdir(seq_path))
        smpl_files = sorted(os.listdir(smpl_path))
        seq_dirs = [[osp.join(seq_path, f), osp.join(smpl_path, f)]
                    for f in sil_files if f in smpl_files]

        if data_in_use is not None:
            seq_dirs = [dir for dir, use_bl in zip(seq_dirs, data_in_use) if use_bl]

        if len(seq_dirs) > 0:
            cam_typ = cam.split("_videoid")[0]
            cam_id = int(cam_typ.replace("camid", "")) if "camid" in cam_typ else 0
            seq_info = [lab, cam_id, seq, seq_dirs]
            seqs_info_list.append(seq_info)
        else:
            msg_mgr.log_debug(f"No valid pkl pairs in {lab}/{cam}/{seq}")

    def __dataset_parser(self, data_config, training):
        dataset_root = data_config['dataset_root']['silhouette_root']
        smpl_root = data_config['dataset_root']['smpl_root']
        dataset_name = data_config['dataset_name'] if training else data_config['test_dataset_name']

        try:
            data_in_use = data_config['data_in_use']
        except:
            data_in_use = None

        with open(data_config['dataset_partition'], "rb") as f:
            partition = json.load(f)

        train_set = partition["TRAIN_SET"]
        test_set = partition["TEST_SET"]
        probe_set = partition.get("PROBE_SET", [])

        label_list = os.listdir(dataset_root)
        train_set = [label for label in train_set if label in label_list]
        test_set = [label for label in test_set if label in label_list]

        msg_mgr = get_msg_mgr()
        label_set = train_set if training else test_set

        seqs_info_list = []
        probe_seqs_info_list = []
        for lab in label_set:
            for cam in sorted(os.listdir(osp.join(dataset_root, lab))):
                for seq in sorted(os.listdir(osp.join(dataset_root, lab, cam))):
                    id_cam_seq = f"{lab}-{cam}-{seq}"
                    if id_cam_seq in probe_set:
                        self.__load_seqs_into_list(dataset_root, smpl_root, lab, cam, seq,
                                                   probe_seqs_info_list, data_in_use)
                    else:
                        self.__load_seqs_into_list(dataset_root, smpl_root, lab, cam, seq,
                                                   seqs_info_list, data_in_use)

        self.seqs_info = probe_seqs_info_list + seqs_info_list
        self.probe_seqs_num = len(probe_seqs_info_list)
