import torch
from transformers import BartForConditionalGeneration, BertTokenizer


class RAGModel(torch.nn.Module):
    def __init__(self, model_name):
        super(RAGModel, self).__init__()
        self.model = BartForConditionalGeneration.from_pretrained(model_name)

    def forward(self, batch):
        return self.model(input_ids=batch['qt_input_ids'], labels=batch['ans_input_ids']).loss

    def generate(self, input_ids, **kwargs):
        return self.model.generate(input_ids, **kwargs)


class RAGInferencer:
    def __init__(self, modelname, device="cpu"):
        """
        阅读器，用于阅读推理。
        :param modelname: 模型路经。
        :param device: 推理时使用的处理器。
        """
        self.device = device
        # self.ragmodel = torch.load(model_path, map_location="cpu").to(device)
        self.ragmodel = RAGModel(modelname)
        # self.ragmodel.load_state_dict(torch.load(model_path, map_location=torch.device(device)))
        self.ragmodel.to(device)
        self.tokenizer = BertTokenizer.from_pretrained(modelname)

    def inference(self, text, question):
        batch = self.tokenizer(text, question, max_length=512, padding='max_length', return_tensors="pt",
                               truncation=True)
        input_ids = batch['input_ids']
        attention_mask = batch['attention_mask']
        return self.tokenizer.batch_decode(self.ragmodel.model.generate(input_ids.to(self.device),
                                                                        attention_mask=attention_mask.to(self.device),
                                                                        num_beams=5,
                                                                        do_sample=True,
                                                                        max_length=512, ),
                                           skip_special_tokens=True)
