from __future__ import annotations
import torch
from torch.utils.data import DataLoader
from transformers import AutoTokenizer
from typing import TYPE_CHECKING, Tuple
from utils.preprocessor import Preprocessor
from ner.collate_function import CollateFunction
if TYPE_CHECKING:
    from ner.ner_model import NerModel


class Dataset(torch.utils.data.Dataset):

    def __init__(self, pretrained_model_name:str, corpus_path:str, corpus_type:str, known_labels:list = None, sentence_max_len:int = 512 - 2) -> None:
        """
        生成一个数据集对象

        参数:
            pretrained_model_name (str): huggingface 预训练模型的名称, 如 'hfl/chinese-bert-wwm-ext', 如果不使用预训练模型, 则传入 None
            path (str): 语料所在的路径
            corpus_type (str): 语料的类型, 现支持 'ccks', 'peoplesdaily', 'aiaiyi' 三种
            sentence_max_len (int, optional): 单个句子的最大长度, 默认值 512-2.

        异常:
            Exception: 当 corpus_type 输入了一个不支持的值时
        """
        super().__init__()
        self.sentence_max_len = sentence_max_len
        self.corpus_type = corpus_type
        self.corpus_path = corpus_path
        print(f'语料：{corpus_type}')
        if corpus_type.lower() == 'ccks':
            corpus_strings, corpus_tokens, corpus_label_indices, known_labels = Preprocessor.read_ccks_corpus(corpus_path, known_labels, sentence_max_len)
        elif corpus_type.lower() == 'peoplesdaily':
            corpus_strings, corpus_tokens, corpus_label_indices, known_labels = Preprocessor.read_peoples_daily_corpus(corpus_path, known_labels, sentence_max_len)
        elif corpus_type.lower() == 'aiaiyi':
            corpus_strings, corpus_tokens, corpus_label_indices, known_labels = Preprocessor.read_aiaiyi_corpus(corpus_path, known_labels, sentence_max_len)
        else:
            raise Exception(f'语料类型{corpus_type}不支持, 仅支持 ccks 或 peoplesdaily 或 aiaiyi')
        print(f'有效数据共{len(corpus_tokens)}条')
        self.corpus_strings = corpus_strings
        self.corpus_tokens = corpus_tokens
        self.corpus_label_indices = corpus_label_indices
        self.known_labels = known_labels
        self.index_label_dict = {index: label for index, label in enumerate(self.known_labels)}
        self.label_index_dict = {label: index for index, label in enumerate(self.known_labels)}
        self.pretrained_model_name = pretrained_model_name
        if pretrained_model_name is not None:
            self.bert_tokenizer = AutoTokenizer.from_pretrained(pretrained_model_name)
        else:
            self.bert_tokenizer = None
        self._data_loader = None

    @staticmethod
    def load_train_and_test_dataset_from(model_file_name:str) -> Tuple[Dataset, Dataset]:
        checkpoint = torch.load(model_file_name)
        print(f'从 {model_file_name} 载入数据集信息')
        pretrained_model_name = checkpoint['pretrained_model_name']
        train_corpus_path = checkpoint['train_corpus_path']
        test_corpus_path = checkpoint['test_corpus_path']
        corpus_type = checkpoint['corpus_type']
        sentence_max_len = checkpoint['sentence_max_len']
        train_dataset = Dataset(pretrained_model_name, train_corpus_path, corpus_type, None, sentence_max_len)
        test_dataset = Dataset(pretrained_model_name, test_corpus_path, corpus_type, None, sentence_max_len)
        return train_dataset, test_dataset

    def __getitem__(self, sentence_index):
        return self.corpus_strings[sentence_index], self.corpus_tokens[sentence_index].copy(), self.corpus_label_indices[sentence_index].copy()

    def __len__(self):
        return len(self.corpus_tokens)

    def get_data_loader(self, model:NerModel, batch_size:int, num_workers:int) -> DataLoader:
        """
        获得与数据集对应的数据载入器

        Args:
            batch_size (int): _description_
            num_workers (int): _description_

        Returns:
            _type_: _description_
        """
        if num_workers > 1:
            raise Exception('请暂时不要使用多线程数据处理')
            print('多线程数据预处理，已停止显示批次例句')
            show_example = False
        else:
            show_example = True
        if self._data_loader is None:
            self._data_loader = DataLoader(
                dataset = self,
                batch_size = batch_size,
                collate_fn = CollateFunction(model, self.bert_tokenizer, self.known_labels, show_example),
                shuffle = True,
                drop_last = True,
                num_workers = num_workers,
            )
        return self._data_loader
    
    def get_token_index(self, token:str) -> int:
        """
        计算一个 token 在预训练模型字典中的编号

        参数:
            token (str): 输入的 token

        返回:
            int: 在预训练模型字典中的编号
        """
        encoded = self.bert_tokenizer.encode_plus([token], is_split_into_words=True)
        return encoded[1]

    def decode(self, sentence_tensor:torch.Tensor) -> str:
        """
        将 torch.Tensor 格式的句子数据解码为普通字符串, 使用预训练模型自有的 tokenizer

        参数:
            tokens (torch.Tensor): 由词向量组成的句子, 维度为 (sentence_len, embedding_dim)

        返回:
            str: 普通字符串
        """        
        return self.bert_tokenizer.decode(sentence_tensor)

    def index_to_label(self, index:int) -> str:
        """
        传入标签的索引, 查询标签的字符串

        参数:
            index (int): 标签的索引

        返回:
            str: 标签的字符串
        """        
        return self.index_label_dict[index]

    def label_to_index(self, label:str) -> int:
        """
        传入标签的字符串, 查询标签的索引

        参数:
            label (str): 标签的字符串

        返回:
            int: 标签的索引
        """        
        return self.label_index_dict[label]
