import torch
import transformers
from transformers import BartForConditionalGeneration, GenerationMixin


class rEncoder_warpper(torch.nn.Module):
    def __init__(self, encoder, d_model, rnn=None):
        super(rEncoder_warpper, self).__init__()
        self.encode = encoder
        if rnn is None:
            self.rnn = torch.nn.RNN(d_model, d_model, batch_first=False)
        else:
            self.rnn = rnn

    def forward(self, input_ids, attention_mask, **kwag):
        # 接受的是[batch_size * k ,seq_len]。输出时转化为[batch_size, k * seq_len, -1]。
        if len(input_ids.shape) == 2:  # 训练

            batch_size, seqlen = input_ids.shape
            k = self.n_passages
            batch_size = int(batch_size / k)
            outputs = self.encode(input_ids, attention_mask, **kwag)
            last_hidden_state = outputs.last_hidden_state.reshape((batch_size, k, seqlen, -1))
            last_hidden_state = torch.split(last_hidden_state, 1)
            last_hidden_state = [torch.squeeze(i, 0) for i in last_hidden_state]
            last_hidden_state = [self.rnn(i)[1] for i in last_hidden_state]  # 取0会导致batch间混合，取1正常。？？
            last_hidden_state = torch.cat(last_hidden_state, 0)
            outputs.last_hidden_state = last_hidden_state

        else:  # 解码
            batch_size, k, seqlen = input_ids.shape
            input_ids = input_ids.reshape(-1, seqlen)
            attention_mask = attention_mask.reshape(-1, seqlen)
            outputs = self.encode(input_ids, **kwag)
            last_hidden_state = outputs.last_hidden_state.reshape((batch_size, k, seqlen, -1))
            last_hidden_state = torch.split(last_hidden_state, 1)
            last_hidden_state = [torch.squeeze(i, 0) for i in last_hidden_state]
            last_hidden_state = [self.rnn(i)[1] for i in last_hidden_state]  # 取0会导致batch间混合，取1正常。？？
            last_hidden_state = torch.cat(last_hidden_state, 0)
            outputs.last_hidden_state = last_hidden_state
        return outputs


class rFiDModel(BartForConditionalGeneration, GenerationMixin):
    def __init__(self, config):
        super().__init__(config)
        self.config = config
        self.rnn = torch.nn.RNN(self.config.d_model, self.config.d_model, batch_first=False)

    def wrapper(self):
        # 训练阶段用这个
        print("wrappering")
        self.model.encoder = rEncoder_warpper(self.model.encoder, self.config.d_model)

    def wrapper_inference(self):
        # 预测阶段用这个
        self.model.encoder = rEncoder_warpper(self.model.encoder, self.config.d_model, self.rnn)

    def unwarpper(self):
        # 用于保存模型参数时，要去除包装。
        self.rnn = torch.nn.RNN(self.config.d_model, self.config.d_model, batch_first=False)
        self.rnn.load_state_dict(self.model.encoder.rnn.state_dict())
        self.model.encoder = self.model.encoder.encode

    def prepare_inputs_for_generation(
            self,
            decoder_input_ids,
            past_key_values=None,
            attention_mask=None,
            decoder_attention_mask=None,
            head_mask=None,
            decoder_head_mask=None,
            cross_attn_head_mask=None,
            use_cache=None,
            encoder_outputs=None,
            **kwargs
    ):
        # cut decoder_input_ids if past_key_values is used
        if past_key_values is not None:
            decoder_input_ids = decoder_input_ids[:, -1:]
        return {
            "input_ids": None,  # encoder_outputs is defined. input_ids not needed
            "encoder_outputs": encoder_outputs,
            "past_key_values": past_key_values,
            "decoder_input_ids": decoder_input_ids,
            "attention_mask": None,  # Fid中需要将掩码去掉。但还不知道为什么。
            "decoder_attention_mask": decoder_attention_mask,
            "head_mask": head_mask,
            "decoder_head_mask": decoder_head_mask,
            "cross_attn_head_mask": cross_attn_head_mask,
            "use_cache": use_cache,  # change this to avoid caching (presumably for debugging)
        }

    def forward(self, input_ids=None, labels=None, **kwargs):
        # 接受(batch_size, n_passages, seqlen)
        if input_ids is not None:
            shape = input_ids.shape
            if len(shape) != 3:
                raise Exception("len(input_ids.shape) != 3,expected (batch_size, n_passages, seqlen)")
            batch_size, n_passages, seqlen = shape
            self.model.encoder.n_passages = n_passages
            input_ids = input_ids.reshape((-1, seqlen))
            labels = labels.reshape((batch_size, seqlen))
        return super().forward(input_ids=input_ids, labels=labels, **kwargs)

    def generate(self, input_ids, **kwargs):
        # 接受(batch_size, n_passages, seqlen)，传入(batch_size, n_passages * seqlen).view(-1, input_ids.size(1))
        self.model.encoder.n_passages = input_ids.shape[1]
        with torch.no_grad():
            res = super().generate(input_ids=input_ids, **kwargs)
        return res