from torch.utils.data import Dataset
import pathlib
import os
import json
import pickle
from typing import List, Dict
from torchvision.transforms import transforms
from PIL import Image


def load_label_lists(dataset_root: pathlib.Path) -> Dict:
    """获取标签列表字典

    字典中的映射为 video_id -> (video_id, frame_id, instrument_id, verb_id, target_id, triplet_id, phase_id)

    Args:
        dataset_root (pathlib.Path): CholecT50数据集的根目录

    Returns:
        Dict: 标签列表字典
    """
    dataset_root = dataset_root.expanduser().resolve()
    file_list = os.listdir(dataset_root)
    # 没有生成过标签缓存，则进行生成
    if "normalcholec50_cache.pkl" not in file_list:
        label_lists = {}
        label_filelist = os.listdir(dataset_root / "labels")
        for label_filename in label_filelist:
            label_filepath = dataset_root / "labels" / label_filename
            with open(label_filepath, "r", encoding="utf-8") as label_file:
                raw_data = json.load(label_file)
                video_id: int = raw_data['video']
                # 转换所有标签为数字（包括帧号），并按帧号排序
                annotations = list(raw_data['annotations'].items())
                annotations = list(map(lambda x: (int(x[0]), *x[1][0]), annotations))
                annotations.sort(key=lambda x: x[0])
                # video_id, frame_id, instrument_id, verb_id, target_id, triplet_id, phase_id
                label_lists[video_id] = [(
                    video_id, frame_anno[0], frame_anno[2],frame_anno[8], frame_anno[9], frame_anno[1], frame_anno[15]
                ) for frame_anno in annotations if frame_anno[1] != -1]
        with open(dataset_root / "normalcholec50_cache.pkl", "wb") as cache_file:
            pickle.dump(label_lists, cache_file)
    else:
        with open(dataset_root / "normalcholec50_cache.pkl", "rb") as cache_file:
            label_lists = pickle.load(cache_file)

    return label_lists



class NormalCholec50(Dataset):
    def __init__(
        self,
        dataset_root: pathlib.Path,
        transforms: transforms.Compose,
        video_ids: List[int]
    ):
        dataset_root = dataset_root.expanduser().resolve()
        assert dataset_root.exists() and dataset_root.is_dir()
        self.video_path = dataset_root / "videos"

        label_lists = load_label_lists(dataset_root)
        self.label_list = []
        for video_id in video_ids:
            self.label_list += label_lists[video_id]

        self.transforms = transforms

    def __getitem__(self, index):
        video_id, frame_id, instrument_id, verb_id, target_id, triplet_id, phase_id = self.label_list[index]
        img_filepath = self.video_path / f"VID{video_id:02d}" / f"{frame_id:06d}.png"
        img = Image.open(img_filepath.as_posix()).convert("RGB")
        img = self.transforms(img)

        return img, instrument_id, verb_id, target_id, triplet_id

    def __len__(self):
        return len(self.label_list)
