"""Cholec数据集官网：http://camma.u-strasbg.fr/datasets/
"""
from torch.utils.data import Dataset
import torch
import pathlib
import pandas
import cv2
from typing import Tuple

class Cholec80(Dataset):
    phase_class_map = {
        "Preparation": 0, "CalotTriangleDissection": 1,
        "ClippingCutting": 2, "GallbladderDissection": 3,
        "GallbladderPackaging": 4, "CleaningCoagulation": 5,
        "GallbladderRetraction": 6
    }
    
    def __init__(self, dataset_root: pathlib.Path, seq_len: int, resize_resolution: Tuple[int]=(240, 427)):
        super().__init__()
        
        self.phase_anno_path = dataset_root / "phase_annotations"
        self.frame_path = dataset_root / "frames"
        self.seq_len = seq_len
        self.resize_resolution = resize_resolution
        
        # 预读取标签，并根据标签长度建立视频索引（前缀和）
        self.phase_targets = []
        self.video_index = []
        for i in range(1, 81):
            anno_filepath = self.phase_anno_path / f"video{i:02d}-phase.txt"
            df = pandas.read_csv(anno_filepath, sep="\t")
            # 取每一秒的第一帧
            targets = df.iloc[::25,]
            # 转换字符串标签为数字
            targets.loc[:, 'Phase'] = targets['Phase'].apply(lambda name: Cholec80.phase_class_map[name])
            targets = targets.loc[:, 'Phase'].to_list()
            self.phase_targets.append(torch.LongTensor(targets))
            # 累加长度的前缀和
            self.video_index.append(len(targets) - seq_len + 1)
            self.video_index[-1] += self.video_index[-2] if len(self.video_index) > 1 else 0
    
    def __len__(self):
        return self.video_index[-1]

    def __getitem__(self, idx):
        """DACAT取batch

        返回两个元素：
        
        1. RGB数据序列，尺寸为 (seq_len, C, H, W)
        2. phase标签序列，尺寸为 (seq_len, )
        """
        # 此处的idx事实上是秒数的索引
        # 根据索引计算是第几个视频 (0-index)
        video_pt = 0
        while self.video_index[video_pt] <= idx:
            video_pt += 1
        # 计算起始帧在对应视频中的秒数
        frame_pt = idx - (self.video_index[video_pt - 1] if video_pt > 0 else 0)
        targets = self.phase_targets[video_pt][frame_pt:frame_pt+self.seq_len]
        
        # (seq_len, C, H, W)
        sequence = []
        for i in range(frame_pt, frame_pt + self.seq_len):
            # 逐帧读取，最后拼接
            frame_filepath = self.frame_path / f"{(video_pt+1):02d}" / f"{i:08d}.jpg"
            frame = cv2.imread(frame_filepath.absolute().as_posix())
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            frame = cv2.resize(frame, self.resize_resolution, interpolation=cv2.INTER_LINEAR)
            frame = frame.transpose(2, 0, 1)
            sequence.append(torch.tensor(frame, dtype=torch.float32))
            
        # 标准化
        sequence = torch.stack(sequence)
        mean = torch.tensor((0.485, 0.456, 0.406)).reshape(1, -1, 1, 1)
        std = torch.tensor((0.229, 0.224, 0.225)).reshape(1, -1, 1, 1)
        sequence = (sequence - mean * 255) / (std * 255)
        
        return sequence, targets


'''
class Cholec80(Dataset):
    phase_class_name = {
        "Preparation": 0, "CalotTriangleDissection": 1,
        "ClippingCutting": 2, "GallbladderDissection": 3,
        "GallbladderPackaging": 4, "CleaningCoagulation": 5,
        "GallbladderRetraction": 6
    }
    
    tool_class_name = {
        "Grasper": 0, "Bipolar": 1, "Hook": 2, "Scissors": 3,
        "Clipper": 4, "Irrigator": 5, "SpecimenBag": 6
    }
    
    class_weight = {
        "Preparation": 1.6411019141231247, "CalotTriangleDissection": 0.19090963801041133,
        "ClippingCutting": 1.0, "GallbladderDissection": 0.2502662616859295,
        "GallbladderPackaging": 1.9176363911137977, "CleaningCoagulation": 0.9840248158200853,
        "GallbladderRetraction": 2.174635818337618
    }

        
    def __init__(self, clip_len: int, dataset_root: pathlib.Path, precise_time_pos: bool=False, resize_resolution: Tuple[int]=(240, 427)):
        """初始化一个Cholec80数据集类

        Args:
            clip_len (int): 返回视频片段的长度，-1表示不分段，返回整个视频
            dataset_root (pathlib.Path): 解压后数据集的根目录
            precise_time_pos (bool): 是否返回以帧为单位的位置，默认返回以片段为单位的位置
        """
        super().__init__()
        assert dataset_root.exists() and dataset_root.is_dir(), "dataset_root位置不存在或不为文件夹"
        assert clip_len == -1 or clip_len > 0, "clip_len有误"
        dataset_root = dataset_root.absolute()
        self.phase_anno_path = dataset_root / "phase_annotations"
        self.tool_anno_path = dataset_root / "tool_annotations"
        self.video_path = dataset_root / "videos"
        
        self.clip_len = clip_len
        self.precise_time_pos = precise_time_pos
        self.resize_resolution = resize_resolution
        # 预读取所有标注并替换标签
        self.phase_anno = []
        self.tool_anno = []
        for i in range(1, 81):
            filepath = self.phase_anno_path / f"video{i:02d}-phase.txt"
            df = pandas.read_csv(
                filepath, sep="\t", dtype={"Frame": int},
                converters={"Phase": lambda val: self.phase_class_name.get(val, -1)}
            )
            df['Phase'] = df['Phase'].astype(np.int32)
            self.phase_anno.append(df)
            
            filepath = self.tool_anno_path / f"video{i:02d}-tool.txt"
            df = pandas.read_csv(
                filepath, sep="\t",
                dtype={"Frame": int, "Grasper": int, "Bipolar": int, "Hook": int,
                       "Scissors": int, "Clipper": int, "Irrigator": int, "SpecimenBag": int}
            )
            self.tool_anno.append(df)
        
        # 根据数据模式为所有视频建立索引
        # video_idx[i]表示第i个视频（含）之前所有数据单元的数量
        self.video_idx = []
        idx_cnt = 0
        for i in range(1, 81):
            videopath = self.video_path / f"video{i:02d}.mp4"
            cap = cv2.VideoCapture(videopath.as_posix())
            num_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
            if self.clip_len > 0:
                idx_cnt += num_frames // self.clip_len
                # 对于不能刚好分段的，余出的大于1帧的，算为一段
                if num_frames % self.clip_len > 1:
                    idx_cnt += 1
            elif self.clip_len == -1:
                idx_cnt += 1
            self.video_idx.append(idx_cnt)
        
    def __len__(self):
        return self.video_idx[-1]
    
    def __getitem__(self, idx):
        # 标记对应的视频下标
        video_pt = 0
        while self.video_idx[video_pt] <= idx:
            video_pt += 1
        # 标记（起始）帧在对应视频中的下标
        if self.clip_len > 0:
            # 可以拓展为任意帧数
            frame_pt = idx - self.video_idx[video_pt - 1] if video_pt > 0 else idx
            frame_pt = frame_pt * self.clip_len
        elif self.clip_len == -1:
            frame_pt = 0

        # 读取视频数据
        videopath = self.video_path / f"video{video_pt+1:02d}.mp4"
        cap = cv2.VideoCapture(videopath.as_posix())
        cap.set(cv2.CAP_PROP_POS_FRAMES, frame_pt)
        has_frame, video_data = cap.read()
        video_data = cv2.resize(video_data, self.resize_resolution, interpolation=cv2.INTER_AREA)
        video_data = np.expand_dims(video_data, 0)
        while has_frame and ((self.clip_len == -1) or (video_data.shape[0] < self.clip_len)):
            has_frame, frame = cap.read()
            if not has_frame:
                break
            frame = cv2.resize(frame, self.resize_resolution, interpolation=cv2.INTER_AREA)
            frame = np.expand_dims(frame, 0)
            video_data = np.concatenate((video_data, frame))
        # 若有最后一段需要补全的情况，则使用全黑0填充
        size = video_data.shape
        if size[0] < self.clip_len:
            padding = np.zeros((self.clip_len-size[0],)+size[1:], dtype=video_data.dtype)
            video_data = np.concatenate((video_data, padding))
        
        # 读取标签数据，并处理为one-hot形式的Tensor
        # phase标签
        phase_anno = self.phase_anno[video_pt].iloc[frame_pt:frame_pt+self.clip_len] if self.clip_len > 0 else self.phase_anno[video_pt]
        phase_label = phase_anno['Phase'].to_list()
        # 长度不足则以最后一个标签填充
        if len(phase_label) < self.clip_len:
            phase_label += [phase_label[-1]] * (self.clip_len - len(phase_label))
        phase_label = F.one_hot(torch.tensor(phase_label), len(self.phase_class_name))
        # tool标签
        tool_anno = self.tool_anno[video_pt]
        tool_anno_len = len(tool_anno)
        cls_name = list(self.tool_class_name.keys())
        if self.clip_len > 0:
            lb = tool_anno.iloc[frame_pt // 25][cls_name].to_list()
            tool_label = torch.tensor(lb).unsqueeze(0)
            for i in range(1, self.clip_len):
                if (frame_pt + i) // 25 < tool_anno_len:
                    lb = tool_anno.iloc[(frame_pt + i) // 25][cls_name].to_list()
                else:
                    # 部分视频的tool标注结尾有一次缺失，按最后一个标注填充
                    lb = tool_anno.iloc[-1][cls_name].to_list()
                lb = torch.tensor(lb).unsqueeze(0)
                tool_label = torch.cat([tool_label, lb], 0)
        else:
            lb = tool_anno[cls_name].to_numpy().tolist()
            tool_label = torch.tensor(lb)
            
            
        # 片段所在视频的片段总数
        clip_num = self.video_idx[video_pt] - self.video_idx[video_pt - 1] if video_pt > 0 else self.video_idx[video_pt]
        # 片段的位置信息，此处直接进行标准化
        if self.precise_time_pos:
            start_time = torch.arange(frame_pt, frame_pt + self.clip_len)
            end_time = start_time + 1
            time_pair = torch.stack((start_time / (self.clip_len * clip_num), end_time / (self.clip_len * clip_num)), dim=1)
        else:
            start_time = torch.full((self.clip_len, ), frame_pt // 25)
            end_time = start_time + 1
            time_pair = torch.stack((start_time / clip_num, end_time / clip_num), dim=1)
        
        # (clip_len, H, W, C), (clip_len, 7), (clip_len, 7), (clip_len, 2)
        return video_data, phase_label, tool_label, time_pair
'''