from torch import nn
import torch
import torch.nn.functional as F
from config.Config import DEVICE

MUL = 0
ADD = 1


class RnnAttention(nn.Module):
    def __init__(self, hidden_size, is_bi_encoder: bool):
        super(RnnAttention, self).__init__()
        self.hidden_size = hidden_size
        self.Wq = nn.Linear(hidden_size, hidden_size)
        if is_bi_encoder:
            self.Wk = nn.Linear(2 * hidden_size, hidden_size)
            self.Wv = nn.Linear(2 * hidden_size, hidden_size)
        else:
            self.Wk = nn.Linear(hidden_size, hidden_size)
            self.Wv = nn.Linear(hidden_size, hidden_size)
        self.W = nn.Linear(hidden_size, hidden_size)
        self.Ws = nn.Softmax(dim=1)

    def forward(self, cur_decoder_output, encoder_outputs, src_lengths):
        query = self.Wq(cur_decoder_output)
        keys = self.Wk(encoder_outputs)
        values = self.Wv(encoder_outputs)  # batch * max_src_len * hidden_size
        scores = self.score(query, keys)  # batch * max_src_len
        max_src_len = torch.max(src_lengths)
        row = torch.arange(max_src_len.item(), device=DEVICE)  # devices [0,1,2,3,...,max_len]
        col = src_lengths.unsqueeze(-1).to(DEVICE)  # batch_size * 1
        score_mask = torch.lt(row, col)  # batch_size * max_src_len
        scores.masked_fill_(~score_mask, float('-inf'))  # 注意力不能放在<pad>上

        alignments = self.Ws(scores)  # batch * max_src_len
        # alignments.unsqueeze(1)  # batch * 1 * max_src_len
        context = torch.bmm(alignments.unsqueeze(1), values)  # batch * 1 * hidden_size
        context = context.squeeze(1)  # batch * hidden_size
        return context, alignments

    def score(self, query, keys):
        query = query.unsqueeze(1)  # query -> [batch, 1, hidden_size]
        # values.transpose(1,2) -> [batch, hidden_size, src_length]
        # keys batch * max_src_len * hidden_size -> transpose(1,2) -> batch * hidden_size * max_src_len
        scores = self.W(query)  # scores : batch * 1 * hidden_size 注意力：Mul score(si,hj) = si^T * W * hj
        scores = torch.bmm(scores, keys.transpose(1, 2))  # scores : batch * 1 * max_src_len
        scores = scores.squeeze(1)  # scores : batch * max_src_len
        return scores


class Attention(nn.Module):
    def __init__(self, encoder_hidden_size, decoder_hidden_size, is_bi_encoder: bool):
        super(Attention, self).__init__()
        self.encoder_hidden_size = encoder_hidden_size
        self.decoder_hidden_size = decoder_hidden_size
        self.Wq = nn.Linear(decoder_hidden_size, encoder_hidden_size)
        if is_bi_encoder:
            self.Wk = nn.Linear(2 * encoder_hidden_size, encoder_hidden_size)
            self.Wv = nn.Linear(2 * encoder_hidden_size, decoder_hidden_size)
        else:
            self.Wk = nn.Linear(encoder_hidden_size, encoder_hidden_size)
            self.Wv = nn.Linear(encoder_hidden_size, decoder_hidden_size)
        # self.W = nn.Linear(encoder_hidden_size, encoder_hidden_size, bias=False)
        self.Ws = nn.Softmax(dim=1)

    def forward(self, cur_decoder_output, encoder_outputs, src_lengths):
        query = self.Wq(cur_decoder_output)
        keys = self.Wk(encoder_outputs)
        values = self.Wv(encoder_outputs)  # batch * max_src_len * hidden_size
        scores = self.score(query, keys)  # batch * max_src_len
        max_src_len = torch.max(src_lengths)
        row = torch.arange(max_src_len.item(), device=DEVICE)  # devices [0,1,2,3,...,max_len]
        col = src_lengths.unsqueeze(-1).to(device=DEVICE)  # batch_size * 1
        score_mask = torch.lt(row, col)  # batch_size * max_src_len
        scores.masked_fill_(~score_mask, float('-inf'))  # 注意力不能放在<pad>上

        alignments = self.Ws(scores)  # batch * max_src_len
        context = torch.bmm(alignments.unsqueeze(1), values)  # batch * 1 * hidden_size
        context = context.squeeze(1)  # batch * hidden_size
        return context, alignments

    def score(self, query, keys):
        # query : [batch, hidden_size]
        # keys : [batch, src_len, hidden_size]
        # return : [batch, src_len]
        query = query.unsqueeze(2)  # query -> [batch, hidden_size,1]
        # values.transpose(1,2) -> [batch, hidden_size, src_length]
        # keys batch * max_src_len * hidden_size -> transpose(1,2) -> batch * hidden_size * max_src_len
        # scores = self.W(query)  # scores : batch * 1 * hidden_size 注意力：Mul score(si,hj) = si^T * W * hj
        scores = torch.bmm(keys, query)  # scores : batch * 1 * max_src_len
        scores = scores.squeeze(2)  # scores : batch * max_src_len
        return scores


if __name__ == '__main__':
    encoder_output_hidden_states = torch.ones(64, 25, 14)  # batch * max_len * e_hidden
    last_layer_encoder_final_state = torch.ones(64, 14)  # batch * e_hidden
    # query_vector = last_layer_encoder_final_state
    # key_vectors = encoder_output_hidden_states
    # attn = Attention(14, 14)
    # context, alignments = attn(query_vector, key_vectors)
