import torch
from transformers import BertForSequenceClassification, BertTokenizer, BertModel
from hashlib import shake_128
from ..utils.BookReader import BasicVectorizer


class DPRModel(torch.nn.Module):
    def __init__(self, model_name, question_model=None, text_model=None, num_labels=50):
        super(DPRModel, self).__init__()
        if isinstance(model_name, str):
            self.question_model = BertForSequenceClassification.from_pretrained(model_name, num_labels=num_labels)
            self.text_model = BertForSequenceClassification.from_pretrained(model_name, num_labels=num_labels)
        else:
            if isinstance(question_model, BertForSequenceClassification) and \
                    isinstance(text_model, BertForSequenceClassification):
                self.question_model = question_model
                self.text_model = text_model
            else:
                raise Exception("错误的参数类型")

    def forward(self, batch):
        question_input_ids = batch['question_input_ids']
        question_token_type_ids = batch['question_token_type_ids']
        question_attention_mask = batch['question_attention_mask']
        text_input_ids = batch['text_input_ids']
        text_token_type_ids = batch['text_token_type_ids']
        text_attention_mask = batch['text_attention_mask']

        question_model_output = self.question_model(input_ids=question_input_ids,
                                                    token_type_ids=question_token_type_ids,
                                                    attention_mask=question_attention_mask).logits
        text_model_output = self.text_model(input_ids=text_input_ids,
                                            token_type_ids=text_token_type_ids,
                                            attention_mask=text_attention_mask).logits
        output = torch.matmul(question_model_output, text_model_output.T)

        return output


class DPRVectorizer(BasicVectorizer):
    def gethashname(self):
        sha = shake_128()
        sha.update(self.model_path.encode())
        return sha.hexdigest(6)

    def __init__(self, model_path, basemodel_path, device='cpu'):
        """
        :param model_path: pymodels.models.dpr模型路经。
        :param basemodel_path: Bert模型路经。
        :param device: 模型推理时使用的处理器，参数参考pytorch。

        ```python
        vectorizer = DPRVectorizer(model_path, tokenizer_path)
        vectorizer.compute_text("你今天吃饭了吗？")
        ```
        """
        super().__init__()
        self.model_path = model_path
        self.model = DPRModel(basemodel_path)  # torch.load(model_path).to(device)
        self.model.load_state_dict(torch.load(model_path, map_location=torch.device(device)))
        self.model.to(device)
        self.tokenizer = BertTokenizer.from_pretrained(basemodel_path)
        self.device = device

    def compute_text(self, text: str):
        """
        返回对文档的向量表示。
        :param text:
        :return:
        """
        encoded_text = self.tokenizer(text=text, return_tensors="pt", max_length=512,
                                      padding='longest', truncation=True).to(self.device)
        encoded_text = {'text_' + k: v.to(self.device) for (k, v) in encoded_text.items()}
        text_input_ids = encoded_text['text_input_ids']
        text_token_type_ids = encoded_text['text_token_type_ids']
        text_attention_mask = encoded_text['text_attention_mask']
        text_model_output = self.model.text_model(input_ids=text_input_ids,
                                                  token_type_ids=text_token_type_ids,
                                                  attention_mask=text_attention_mask
                                                  ).logits
        text_model_output = text_model_output.cpu().detach().numpy()
        return text_model_output

    def compute_question(self, question: str):
        """
        返回对问题的向量表示。
        :param question:
        :return:
        """
        encoded_question = self.tokenizer(text=question, return_tensors="pt", max_length=512,
                                          padding='longest', truncation=True).to(self.device)
        encoded_question = {'question_' + k: v.to(self.device) for (k, v) in encoded_question.items()}
        question_input_ids = encoded_question['question_input_ids']
        question_token_type_ids = encoded_question['question_token_type_ids']
        question_attention_mask = encoded_question['question_attention_mask']
        question_model_output = self.model.text_model(input_ids=question_input_ids,
                                                      token_type_ids=question_token_type_ids,
                                                      attention_mask=question_attention_mask
                                                      ).logits
        text_model_output = question_model_output.cpu().detach().numpy()
        return text_model_output


class tinybertVectorizer(BasicVectorizer):
    """
    对问题和上下文采用同一个bert模型。并且不使用cls分类层，而是采用输出的pooler_output值。
    """
    def __init__(self, modelname: str="zerohell/tinydpr-acc_0.315-bs_307"):
        super().__init__()
        self.modelname = modelname
        self.model = BertModel.from_pretrained(self.modelname)
        self.tokenizer = BertTokenizer.from_pretrained(self.modelname)

    def compute(self, text: str):
        encoded_text = self.tokenizer(text=text, return_tensors="pt", max_length=512,
                                      padding='longest', truncation=True).to(self.model.device)
        encoded_text = {'text_' + k: v.to(self.model.device) for (k, v) in encoded_text.items()}
        text_input_ids = encoded_text['text_input_ids']
        text_token_type_ids = encoded_text['text_token_type_ids']
        text_attention_mask = encoded_text['text_attention_mask']
        text_model_output = self.model(input_ids=text_input_ids,
                                       token_type_ids=text_token_type_ids,
                                       attention_mask=text_attention_mask
                                       ).pooler_output
        text_model_output = text_model_output.cpu().detach().numpy()
        return text_model_output

    def compute_question(self, question: str):
        return self.compute(question)

    def compute_text(self, text: str):
        return self.compute(text)

    def gethashname(self):
        sha = shake_128()
        sha.update(self.modelname.encode())
        return sha.hexdigest(6)
