from typing import List, Tuple, Optional

import torch
from transformers import BertTokenizer
from transformers.generation import BeamSearchDecoderOnlyOutput

from pymodels.models.fid import FiDModel, get_batch_from_data
from pymodels.utils.Explaintation import LMExplaintation

from pluginUtils import BasicComprehensionPlugin


class ComprehensionPlugin(BasicComprehensionPlugin):
    def __init__(self, model_path, device="cpu"):
        """
        阅读器，用于阅读推理。
        :param model_path: 模型路经。
        :param device: 推理时使用的处理器。
        """
        super().__init__()
        self.device = device
        self.model_path = model_path
        self.device = device
        self.tokenizer = BertTokenizer.from_pretrained(model_path)
        self.fidmodel = None

    def hibernate(self):
        self.fidmodel = None

    def wake(self):
        self.fidmodel = FiDModel.from_pretrained(self.model_path)
        self.fidmodel.wrapper()
        self.fidmodel.to(self.device)

    def inference(self, texts: List[str], question: str, explained: bool = False, num_beams: int = 5,
                  do_sample: bool = True, max_length: int = 512) -> Tuple[str, Optional[LMExplaintation]]:
        """
        :param texts:
        :param question:
        :param explained: 是否启用解释。
        :param num_beams: beam搜索数。
        :param do_sample: 是否开启采样。
        :param max_length: 生成文本的最大长度。
        :return:
        """
        batch = get_batch_from_data(question, texts, self.tokenizer, {
            'max_length': 512, 'padding': 'longest', 'return_tensors': "pt", 'truncation': True
        })
        batch.pop("token_type_ids")
        batch = {k: v.to(self.device) for k, v in batch.items()}
        outputs: BeamSearchDecoderOnlyOutput = \
            self.fidmodel.generate(**batch, output_scores=explained, return_dict_in_generate=True,
                                   num_beams=num_beams, do_sample=do_sample, max_length=max_length, )
        answer = self.tokenizer.batch_decode(outputs.sequences, skip_special_tokens=True)[0]
        answer = answer.replace(" ", "")
        if explained:
            transition_scores = self.fidmodel.compute_transition_scores(outputs.sequences,
                                                                        outputs.scores,
                                                                        beam_indices=outputs.beam_indices,
                                                                        normalize_logits=True)  # todo: 有时会报错
            transition_proba = torch.exp(transition_scores)
            input_length = 1
            generated_ids = outputs.sequences[:, input_length:]
            generated_tokens = self.tokenizer.convert_ids_to_tokens(generated_ids[0])
            token_pro = list(zip(generated_tokens, transition_proba[0].tolist()))  # 每个字符的概率
            explantation = LMExplaintation(sentence_pro=outputs.sequences_scores.cpu().exp().item(),
                                           token_pro=token_pro)
            return answer, explantation
        return answer

    def get_introduction(self):
        return """作者：xzh\n日期：2023年4月9日\n邮箱：kodderopert@163.com"""
