from typing import List, Tuple, Optional

from pymodels.utils.Explaintation import LMExplaintation
from pluginUtils import BasicComprehensionPlugin
from pymodels.models.rag import RAGModel
import torch
from transformers.models.bert import BertTokenizer
from transformers.generation import BeamSearchDecoderOnlyOutput


class ComprehensionPlugin(BasicComprehensionPlugin):
    def __init__(self, modelname: str = "zerohell/rag-bart-bleu_error", device="cpu"):
        """
        阅读器，用于阅读推理。
        :param modelname: 模型路经。
        :param device: 推理时使用的处理器。
        """
        super().__init__()
        self.device = device
        self.modelname = modelname
        self.device = device
        self.tokenizer = BertTokenizer.from_pretrained(self.modelname)
        self.ragmodel = None

    def hibernate(self):
        self.ragmodel = None

    def wake(self):
        self.ragmodel = RAGModel(self.modelname)
        self.ragmodel.to(self.device)

    def inference(self, texts: List[str], question: str, explained: bool = False, use_p: int = 0) \
            -> Tuple[str, Optional[LMExplaintation]]:
        """
        :param texts:
        :param question:
        :param explained: 是否启用解释。
        :param use_p:使用第`use_p`个文本进行推理。
        :return:
        """
        batch = self.tokenizer(texts[use_p], question, max_length=512, padding='longest', return_tensors="pt",
                               truncation=True)
        input_ids = batch['input_ids']
        attention_mask = batch['attention_mask']
        outputs: BeamSearchDecoderOnlyOutput = \
            self.ragmodel.model.generate(input_ids=input_ids.to(self.device), output_scores=explained,
                                         attention_mask=attention_mask.to(self.device),
                                         return_dict_in_generate=True,
                                         num_beams=5, do_sample=True, max_length=512, )
        answer = self.tokenizer.batch_decode(outputs.sequences, skip_special_tokens=True)[0]
        answer = answer.replace(" ", "")
        if explained:
            transition_scores = self.ragmodel.model.compute_transition_scores(outputs.sequences,
                                                                              outputs.scores,
                                                                              beam_indices=outputs.beam_indices,
                                                                              normalize_logits=True)  # todo: 有时会报错
            transition_proba = torch.exp(transition_scores)
            input_length = 1
            generated_ids = outputs.sequences[:, input_length:]
            generated_tokens = self.tokenizer.convert_ids_to_tokens(generated_ids[0])
            token_pro = list(zip(generated_tokens, transition_proba[0].tolist()))  # 每个字符的概率
            explantation = LMExplaintation(sentence_pro=outputs.sequences_scores.cpu().exp().item(),
                                           token_pro=token_pro)
            return answer, explantation
        return answer

    def get_introduction(self):
        return """作者：xzh\n日期：2023年3月29日\n邮箱：kodderopert@163.com"""
