import torch
import torch.nn as nn
from pytorch_lightning import LightningModule
from torch.nn import CrossEntropyLoss
from transformers import BertModel
from .transformer.models import Decoder, Decoder_late


class PgNetBasedBert(LightningModule):
    def __init__(self, config, bert_model):
        super(PgNetBasedBert, self).__init__()
        self.config = config
        self.bert1 = bert_model
        self.bert1_linear = nn.Sequential(
            nn.Linear(self.bert1.config.hidden_size, self.bert1.config.hidden_size),
            nn.ReLU(True))

        # encoder和decoder的转换
        self.bert2dec_ques = nn.Linear(self.bert1.config.hidden_size, config.d_model)

        self.bert2dec = nn.Linear(self.bert1.config.hidden_size, config.d_model)

        # decoder
        self.decoder_ques = Decoder(config.n_layers, config.d_k, config.d_v, config.d_model, config.d_ff,
                                    config.n_heads,
                                    config.max_ans_length, config.tgt_vocab_size, config.dropout, config.weighted_model)
        self.decoder = Decoder_late(config.n_layers, config.d_k, config.d_v, config.d_model, config.d_ff,
                                    config.n_heads,
                                    config.dropout, config.weighted_model)

        self.tgt_proj = nn.Linear(config.d_model, config.tgt_vocab_size, bias=False)
        self.sigmoid = nn.Sigmoid()
        # 计算p_gen的概率，标量
        self.p = torch.nn.Linear(config.d_model * 2, 1)

        self.loss_fct = CrossEntropyLoss()

    def forward(self, batch):
        coverage = torch.zeros([self.config.batch_size, self.config.max_seq_length])
        coverage_loss = torch.zeros([1])
        input_ids = batch.input_ids
        input_ids_q = batch.input_ids_q
        token_type_ids = batch.segment_ids
        can_answer = batch.can_answer
        attention_mask = batch.input_mask
        attention_mask_q = batch.input_mask_q
        dec_inputs = batch.answer_ids[:, :-1]
        dec_inputs_len = batch.answer_mask.sum(1) - 1

        # encoder backward
        sequence_output = self.bert1(input_ids, token_type_ids=token_type_ids,
                                     attention_mask=attention_mask).last_hidden_state
        sequence_output = self.bert1_linear(sequence_output)

        # get question representation ()
        ques = self.bert1(input_ids_q, attention_mask=attention_mask_q).last_hidden_state
        # use question to decode first time
        ques = self.bert2dec_ques(ques)
        # compare the element and choose the bigger as the hidden
        # ques = torch.max(ques, ques2, 1)
        dec_outputs_ques, dec_self_attns_ques, dec_enc_attns_ques = \
            self.decoder_ques(dec_inputs, dec_inputs_len, input_ids_q, ques, return_attn=True, is_initial=True)
        # 768-》256
        sequence_output = self.bert2dec(sequence_output)

        # dec_outputs -> (b, tokens, 512);      dec_enc_attns layers list, (b, heads, dec_len, enc_len)
        dec_outputs, dec_self_attns, dec_enc_attns = \
            self.decoder(dec_inputs, dec_inputs_len, dec_outputs_ques, input_ids, sequence_output, return_attn=True,
                         is_initial=False)
        dec_logits = self.tgt_proj(dec_outputs)

        # coverage机制
        # update the coverage       dec_enc_attns, obtaining the attention of the last head in the last layer, (b, dec_len, enc_len)
        dec_enc_attn = dec_enc_attns[-1][:, -1, :, :]
        contextual = torch.bmm(dec_enc_attn, sequence_output)
        coverage = dec_enc_attn[:, 0, :]  # (4, enc_len)
        coverage_loss = torch.zeros([1]).to(self.config.device)
        attn_values = torch.zeros([dec_enc_attn.size()[0], dec_enc_attn.size()[1], self.config.vocab_size]).to(
            self.config.device)
        # 从源端复制，t=0
        index = input_ids
        attn = dec_enc_attn[:, 0, :]
        attn_value = torch.zeros([attn.size()[0], self.config.vocab_size]).to(self.config.device)
        attn_value = attn_value.scatter_(1, index, attn)
        attn_values[:, 0, :] = attn_value
        # t=1开始循环遍历
        for i in range(1, dec_enc_attn.size()[1]):
            current_att = dec_enc_attn[:, i, :]
            coverage_loss = coverage_loss + torch.sum(torch.min(current_att.reshape(-1, 1), coverage.reshape(-1, 1)), 0)
            coverage = coverage + dec_enc_attn[:, i, :]

            # 从源端复制
            index = input_ids
            attn = dec_enc_attn[:, i, :]
            attn_value = torch.zeros([attn.size()[0], self.config.vocab_size]).to(self.config.device)
            attn_value = attn_value.scatter_(1, index, attn)
            attn_values[:, i, :] = attn_value

        # pg-net，embedding是decoder的输入(4, dec_len, 256)，sequence_output是上下文向量(4, dec_len, 256)
        embedding = dec_outputs_ques
        contextual = contextual
        p = self.sigmoid(self.p(torch.cat([embedding, contextual], -1)).squeeze(-1)).unsqueeze(-1)  # (4, 127, 1)
        final_dist = (1 - p) * dec_logits + p * attn_values  # attn_values (4, 127, vocabs)
        final_dist = final_dist.view(-1, dec_logits.size(-1))

        return final_dist, coverage_loss
