from __future__ import annotations
from multiprocessing import dummy
import torch
import os.path
from typing import Any, List, Union
from typing import TYPE_CHECKING
from transformers import AutoTokenizer
from ner.collate_function import CollateFunction
from ner.entity_replenish import EntityReplenish
from ner.dataset import Dataset
from utils.preprocessor import Preprocessor
from collections import defaultdict
if TYPE_CHECKING:
    from ner.ner_model import NerModel

er = EntityReplenish()


class Predictor:

    def __init__(self, model: NerModel, known_labels: List[str]) -> None:
        """
        创建一个预测器, 该预测器可使用 NER 模型, 对输入的字符串进行序列标注

        参数:
            model (NerModel): 待使用的 NER 模型
            dataset (Dataset): 待使用的数据集。标签将在该数据集中统计。
        """
        self.model = model
        self.known_labels = known_labels
        if self.model.pretrained_model_name is not None:
            local_tokenizer_path = 'models/' + self.model.pretrained_model_name.replace('/', '-') + '-tokenizer/'
            if os.path.exists(local_tokenizer_path):
                print(f'从 {local_tokenizer_path} 载入编码器')
                self.bert_tokenizer = AutoTokenizer.from_pretrained(local_tokenizer_path)
            else:
                print(f'本地编码器不存在，从远程下载')
                self.bert_tokenizer = AutoTokenizer.from_pretrained(self.model.pretrained_model_name)
                self.bert_tokenizer.save_pretrained(local_tokenizer_path)
                print(f'编码器保存到 {local_tokenizer_path}')
        else:
            self.bert_tokenizer = None
        self.collate_function = CollateFunction(self.model, self.bert_tokenizer, self.known_labels, False)
        # CPU 和 GPU 的确定
        self._device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
        # self._device = torch.device("cpu")
        print(f'P使用{self._device}')
        self.model = self.model.to(self._device)

    def predict(self, text:str, return_style:str):
        """
        输入一个字符串, 预测该字符串的序列标记结果

        参数:
            text (str): 输入的字符串
            return_label_index_list (bool): 是否返回标签索引？如果是, 则返回一个整数列表；如果否, 则返回一个字符串, 默认值 False
            return_style (str): 返回数据的风格。
                                如果是'int', 则返回标签索引的列表；
                                如果是'label', 则返回标签字符串的列表;
                                如果是'parallel', 则返回原字符与标签平行对应的字符串
                                如果是'isolated', 则将实体抽取出来，单独返回
                                默认为'isolated'
        返回:
            Union[str, List[int]]: 返回字符串, 或整数数组, 其内容为输入字符串的序列标记结果
        """
        return self.predict_batch([text], return_style)

    def predict_batch(self, batch_text:List[str], return_style:str = 'isolated'):
        """
        输入一个批次的字符串数组, 预测该字符串的序列标记结果
        注意，应该自行掌握批次大小，否则可能会超出显存限制而出错

        Args:
            batch_text (List[str]): 输入一个批次的字符串
            return_style (str): 返回数据的风格。
                                如果是'int', 则返回标签索引的列表；
                                如果是'label', 则返回标签字符串的列表;
                                如果是'parallel', 则返回原字符与标签平行对应的字符串
                                如果是'isolated', 则将实体抽取出来，单独返回
                                默认为'isolated'
        返回:
            Union[List[List[str]], List[List[int]]]: 返回字符串列表, 或整数数组列表, 其内容为输入批次字符串的序列标记结果
        """
        batch_tokens = []
        batch_dummy_labels = []
        for text in batch_text:
            sentence_tokens = [c for c in text]
            dummy_labels = [0] * len(sentence_tokens)
            _, sentence_tokens, dummy_labels = Preprocessor.filter_blank_chars(sentence_tokens, dummy_labels)
            _, sentence_tokens, dummy_labels = Preprocessor.filter_illegal_chars(sentence_tokens, dummy_labels)
            batch_tokens.append(sentence_tokens)
            batch_dummy_labels.append(dummy_labels)
        batch_text = [''.join(sentence_tokens) for sentence_tokens in batch_tokens]
        batch_data = [(text, tokens, dummy_labels) for text, tokens, dummy_labels in zip(batch_text, batch_tokens, batch_dummy_labels)]
        # print('开始编码')
        bert_token_ids, bert_token_types, bert_masks, bert_gold_label_ids, batch_strings, word_vec_input_tensors = self.collate_function(batch_data)
        # print('编码完毕')
        if bert_gold_label_ids is None:
            # 说明预训练模型和词向量都不存在，直接返回
            print('预训练模型和词向量都不存在')
            return ''
        # 数据传入 GPU 或 CPU
        # if bert_token_ids is not None:
        bert_token_ids = bert_token_ids.to(self._device)
        bert_token_types = bert_token_types.to(self._device)


        bert_masks = bert_masks.to(self._device)
        bert_gold_label_ids = bert_gold_label_ids.to(self._device)
        # 模型设置为测试状态
        self.model.eval()
        # 前向计算
        out = self.model(
                        batch_strings,
                        word_vec_input_tensors,
                        bert_token_ids,
                        bert_token_types,
                        bert_masks,
                        bert_gold_label_ids,
                        self._device
                        ).to(self._device)
        result = []

        for sentence_tokens, sentence_mask, sentence_out in zip(batch_tokens, bert_masks, out):
            # 去除 mask 外的内容
            sentence_out = sentence_out[sentence_mask == 1]
            # 去除 [CLS] 和 [SEQ] 标签
            sentence_out = sentence_out[1:-1]
            # CRF 输出结果是整数标签，而 Softmax 输出结果是浮点向量，统一为整数标签
            if sentence_out.dtype is torch.float32 or sentence_out.dtype is torch.float64:
                sentence_out = sentence_out.argmax(dim = 1)
            elif sentence_out.dtype is torch.int32 or sentence_out.dtype is torch.int64:
                sentence_out = sentence_out

            # 输出风格
            if return_style == 'int':
                result.append([index for index in sentence_out])
            elif return_style == 'label':
                result.append([self.known_labels[index] for index in sentence_out])
            elif return_style == 'parallel':
                parallel_str = '句子\t标签序号\t标签名称\n'
                for char, label_index in zip(sentence_tokens, sentence_out):
                    parallel_str += f'{char}\t{label_index}\t{self.known_labels[label_index]}\n'
                result.append(parallel_str)
            elif return_style == 'isolated':
                text = ''.join(sentence_tokens)
                entities = defaultdict(list)
                for i, label_index in enumerate(sentence_out):
                    label_name = self.known_labels[label_index]
                    if label_name.startswith('B-'):
                        entity_span_start = i
                        label_name_stem = label_name[2:]
                        for isub in range(i + 1, len(sentence_out)):
                            entity_span_end = isub
                            sub_label_index = sentence_out[isub]
                            sub_label_name = self.known_labels[sub_label_index]
                            if (not sub_label_name.startswith('I-')) or (not sub_label_name[2:] == label_name_stem):
                                break
                        entity = text[entity_span_start:entity_span_end]
                        label_name = label_name_stem
                        # 无重复的
                        if entity not in entities[label_name]:
                            entities[label_name].append(entity)
                # 基于字典对预测缺失的实体进行补全
                entities = er.entity_full_batch(entities, text)
                result.append({
                    'text': text,
                    'entitys': entities
                })
            else:
                raise Exception(f'{return_style} 没有此风格')

        print('推理完毕')
        return result