from typing import Union, List
from collections import defaultdict

import torch
from transformers import BertTokenizer, BartForConditionalGeneration, BatchEncoding, GenerationMixin


class Encoder_warpper(torch.nn.Module):
    def __init__(self, encoder):
        super(Encoder_warpper, self).__init__()
        self.encode = encoder

    def forward(self, input_ids, attention_mask, **kwag):
        # 接受的是[batch_size * k ,seq_len]。输出时转化为[batch_size, k * seq_len, -1]。
        if len(input_ids.shape) == 2:  # 训练
            batch_size, seqlen = input_ids.shape
            k = self.n_passages
            batch_size = int(batch_size / k)
            outputs = self.encode(input_ids, attention_mask, **kwag)
            outputs.last_hidden_state = outputs.last_hidden_state.reshape((batch_size, k * seqlen, -1))
        else:  # 解码
            batch_size, k, seqlen = input_ids.shape
            input_ids = input_ids.reshape(-1, seqlen)
            attention_mask = attention_mask.reshape(-1, seqlen)
            outputs = self.encode(input_ids, attention_mask, **kwag)
            outputs.last_hidden_state = outputs.last_hidden_state.reshape((batch_size, k * seqlen, -1))
        return outputs


class FiDModel(BartForConditionalGeneration, GenerationMixin):
    def __init__(self, config):
        """
        :param config:
        fid = FiDModel.from_pretrained(path)
        fid.wrapper()

        dpr = DPRVectorizer(r"D:\static\trained model\dpr-bert-acc@0.961\model.param.pt",
                    r"D:\static\trained model\mengzi", device='cuda:0')
        reader = TxtBookReader(verctorizer=dpr)
        reader.readbook("../../bookshelf/《潜艇（精简版）》.txt")

        tokenizer = BertTokenizer.from_pretrained(r"D:\static\trained model\fid-bart-bleu@0.150\pretrained")

        question = ["潜艇损管的内容是什么？"]
        _, context = reader.search(question, k=3)
        batch = get_batch_from_data(question, context, tokenizer, {
            'max_length': 512, 'padding': 'longest', 'return_tensors': "pt", 'truncation': True
        })
        batch.pop("token_type_ids")
        outputs = fid.generate(**batch,
                               num_beams=5,
                               do_sample=True,
                               max_length=512,
                               min_length=50
                               )
        print(tokenizer.batch_decode(outputs))
        """
        super().__init__(config)

    def wrapper(self):
        self.model.encoder = Encoder_warpper(self.model.encoder)

    def prepare_inputs_for_generation(
            self,
            decoder_input_ids,
            past_key_values=None,
            attention_mask=None,
            decoder_attention_mask=None,
            head_mask=None,
            decoder_head_mask=None,
            cross_attn_head_mask=None,
            use_cache=None,
            encoder_outputs=None,
            **kwargs
    ):
        # cut decoder_input_ids if past_key_values is used
        if past_key_values is not None:
            decoder_input_ids = decoder_input_ids[:, -1:]
        return {
            "input_ids": None,  # encoder_outputs is defined. input_ids not needed
            "encoder_outputs": encoder_outputs,
            "past_key_values": past_key_values,
            "decoder_input_ids": decoder_input_ids,
            "attention_mask": None,  # Fid中需要将掩码去掉。但还不知道为什么。
            "decoder_attention_mask": decoder_attention_mask,
            "head_mask": head_mask,
            "decoder_head_mask": decoder_head_mask,
            "cross_attn_head_mask": cross_attn_head_mask,
            "use_cache": use_cache,  # change this to avoid caching (presumably for debugging)
        }

    def forward(self, input_ids=None, labels=None, **kwargs):
        # 接受(batch_size, n_passages, seqlen)
        if input_ids is not None:
            shape = input_ids.shape
            if len(shape) != 3:
                raise Exception("len(input_ids.shape) != 3,expected (batch_size, n_passages, seqlen)")
            batch_size, n_passages, seqlen = shape
            self.model.encoder.n_passages = n_passages
            input_ids = input_ids.reshape((-1, seqlen))
            labels = labels.reshape((batch_size, seqlen))
        return super().forward(input_ids=input_ids, labels=labels, **kwargs)

    def generate(self, input_ids, attention_mask, **kwargs):
        # 接受(batch_size, n_passages, seqlen)，传入(batch_size, n_passages * seqlen).view(-1, input_ids.size(1))
        self.model.encoder.n_passages = input_ids.shape[1]
        with torch.no_grad():
            res = super().generate(input_ids=input_ids, attention_mask=attention_mask, **kwargs)
        return res


def get_batch_from_data(questions: Union[List[str], str], contenxts: Union[List[List[str]], List[str]], tokenizer: BertTokenizer, tokenizer_kw: dict) \
        -> BatchEncoding:
    """
    用于将文本转换为token_id的工具函数。
    :param questions: 要转换的问题。
    :param contenxts: 问题相关的上下文。
    :param tokenizer: tokenizer对象。
    :param tokenizer_kw: 传递给tokenizer对象的参数。
    :return:
    """
    if isinstance(questions, str):
        questions = [questions]
    if isinstance(contenxts, list) and isinstance(contenxts[0], str):
        contenxts = [contenxts]
    data = defaultdict(list)
    for question, context in zip(questions, contenxts):
        context = [question + ctx for ctx in context]
        context = tokenizer(context, **tokenizer_kw)
        for k, v in context.items():
            data[k].append(v)
    for k, v in data.items():
        data[k] = torch.stack(v)
    return BatchEncoding(data)
