from __future__ import annotations
from datetime import datetime
import time
from sympy import Not
import torch
from typing import TYPE_CHECKING, Any, List, Tuple
from colorama import Fore, Style

from word_vec_encoders.char_word_vec_encoder import CharWordVecEncoder
if TYPE_CHECKING:
    from ner.ner_model import NerModel
    from ner.dataset import Dataset

class CollateFunction:

    def __init__(self, model:NerModel, bert_tokenizer:Any, known_labels:list, show_example_sentence:bool = True) -> None:
        """
        创建一个数据整理函数对象, 常用于 torch.utils.data.DataLoader 的创建

        参数:
            model (NerModel): 对应的模型
            dataset (Dataset): 对应的数据集
        """ 
        self.model = model
        self.bert_tokenizer = bert_tokenizer
        self.known_labels = known_labels
        self.show_example_sentence = show_example_sentence

    def __call__(self, batch_data:List[Tuple[str, List[str], List[int]]]) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor, torch.Tensor]:
        
        # 重整为单词和标签分离的形式
        batch_strings = [tup[0] for tup in batch_data]
        batch_tokens = [tup[1] for tup in batch_data]
        batch_label_indices = [tup[2] for tup in batch_data]

        batch_sentence_len = max([len(s) for s in batch_strings]) + 2

        # 对齐标签和编码
        batch_encoded_label_indices = []
        bert_control_tag_index = self.known_labels.index('控制标签')
        for sentence_label_indices in batch_label_indices:
            assert(len(sentence_label_indices) + 2 <= batch_sentence_len)
            # 对照编码后的 token 数组对齐标签索引
            sentence_encoded_label_indices = [bert_control_tag_index] # [CLS]
            sentence_encoded_label_indices.extend([idx for idx in sentence_label_indices]) # 正文
            sentence_encoded_label_indices.extend([bert_control_tag_index]) # [SEP]
            sentence_encoded_label_indices.extend([bert_control_tag_index] * (batch_sentence_len - len(sentence_encoded_label_indices))) # [PAD]
            batch_encoded_label_indices.append(sentence_encoded_label_indices)
        bert_gold_label_ids = torch.LongTensor(batch_encoded_label_indices)

        # 输出本批的第一个句子
        # if self.show_example_sentence:
        #     print(f'批次例句：', end='')
        #     [print(f'{Fore.YELLOW}{c}{Fore.BLUE}{i}', end=f'{Fore.YELLOW}') if i != self.dataset.known_labels.index('O') else print(f'{Fore.YELLOW}{c}', end='') for c, i in zip(batch_strings[0], batch_label_indices[0])]
        #     print(f'{Style.RESET_ALL}')

        # 预训练语言模型
        bert_token_ids = None
        bert_token_types = None
        if self.bert_tokenizer is not None:
            batch_encoded = self.bert_tokenizer.batch_encode_plus(
                batch_tokens,
                is_split_into_words=True, 
                padding=True,
                return_tensors='pt'
            )
            bert_token_ids = batch_encoded['input_ids']
            bert_masks = torch.LongTensor(batch_encoded['attention_mask'])
            bert_token_types = batch_encoded['token_type_ids']
        else:
            # 在没有与训练模型的情况下，制造 Bert 式 mask，即 [PAD] 部分是 0，其余部分是 1
            batch_size = len(batch_strings)
            bert_masks = torch.zeros(batch_size, batch_sentence_len)
            for isentence, sentence in enumerate(batch_strings):
                bert_masks[isentence, 0] = 1 # [CLS]
                for i in range(len(sentence)):
                    bert_masks[isentence, i + 1] = 1 # 字符串内容
                bert_masks[isentence, len(sentence) + 1] = 1 # [SEP]

        # 其他特征
        word_vec_input_tensors = self.model.word_vec_encoder_set.collate_batch_tensor(batch_strings)


        return bert_token_ids, bert_token_types, bert_masks, bert_gold_label_ids, batch_strings, word_vec_input_tensors