import torch
import torch.nn as nn
import torch.nn.functional as F

from mec.utils.tensor import sequence_mask


class DotAttention(nn.Module):
    def __init__(self, query_size, key_size):
        super(DotAttention, self).__init__()
        self.query_size = query_size
        self.key_size = key_size
        self.v = nn.Parameter(torch.tensor(1.))
        self.Wk = nn.Linear(query_size, key_size, bias=False)

    def forward(self, query, memory_bank, memory_lengths):
        keys = self.Wk(memory_bank)
        scores = self.score(query, keys)  # batch * max_src_len
        max_src_len = keys.shape[1]
        row = torch.arange(max_src_len, device=query.device)  # devices [0,1,2,3,...,max_len]
        col = memory_lengths.unsqueeze(-1).to(device=query.device)  # batch_size * 1
        score_mask = torch.lt(row, col)  # batch_size * max_src_len
        scores.masked_fill_(~score_mask, float('-inf'))  # 注意力不能放在<pad>上
        alignments = torch.softmax(scores, dim=1)  # batch * max_src_len
        context = torch.bmm(alignments.unsqueeze(1), keys)  # batch * 1 * hidden_size
        context = context.squeeze(1)  # batch * hidden_size
        return context, alignments

    def score(self, query, keys):
        # query : [batch, decoder_hidden_size]
        # keys : [batch, src_len, encoder_hidden_size]
        # return : [batch, src_len]
        query = query.unsqueeze(2)  # query -> [batch, hidden_size,1]
        # values.transpose(1,2) -> [batch, hidden_size, src_length]
        # keys batch * max_src_len * hidden_size -> transpose(1,2) -> batch * hidden_size * max_src_len
        # scores = self.W(query)  # scores : batch * 1 * hidden_size 注意力：Mul score(si,hj) = si^T * W * hj
        scores = torch.bmm(keys, query)  # scores : batch * 1 * max_src_len
        scores = scores.squeeze(2)  # scores : batch * max_src_len
        scores = self.v * scores
        return scores


class GlobalAttention(nn.Module):

    def __init__(self, key_dim, query_dim):
        super(GlobalAttention, self).__init__()
        self.Wk = nn.Linear(key_dim, query_dim)
        self.linear_out = nn.Linear(2 * query_dim, query_dim)

    def score(self, h_tgt, h_src):
        """
        Input:
          h_tgt: sequence of queries [batch, max_tgt_len, dim]
          h_src: sequence of sources [batch, max_src_len, dim]

        Returns:
          scores: [batch, max_tgt_len, max_src_len] 没有归一化
        """
        h_s_ = h_src.transpose(1, 2)  # [batch, dim, max_src_len]
        return torch.bmm(h_tgt, h_s_)  # [batch, max_tgt_len, max_src_len]

    def forward(self, tgt, memory_bank, memory_lengths=None):
        """

        Input:
          tgt : [batch, tgt_len, dim] or [batch, dim]
          memory_bank : [batch, src_len, dim]
          memory_lengths : [batch,]

        Returns:
          context : [batch, tgt_len, dim] or [batch, dim]
          attention : [batch, max_tgt_len, max_src_len] or [batch, max_src_len]
        """
        # one-step input
        if tgt.dim() == 2:
            one_step = True
            tgt = tgt.unsqueeze(1)  # [batch, 1, dim]
        else:
            one_step = False

        batch, source_l, dim = memory_bank.size()
        batch_, target_l, dim_ = tgt.size()

        memory_bank = self.Wk(memory_bank)
        align = self.score(tgt, memory_bank)  # [batch, tgt_len ,src_len]

        if memory_lengths is not None:
            mask = sequence_mask(memory_lengths, max_len=align.size(-1))
            mask = mask.unsqueeze(1)
            align.masked_fill_(~mask, float('-inf'))

        # Softmax normalize attention weights
        align_vectors = F.softmax(align.view(batch * target_l, source_l), -1)
        align_vectors = align_vectors.view(batch, target_l, source_l)  # [batch, tgt_len, src_len]

        # each context vector c_t is the weighted average
        # over all the source hidden states
        # [batch, tgt_len, src_len] * [batch, src_len, dim] -> [batch, tgt_len, dim]
        c = torch.bmm(align_vectors, memory_bank)  # [batch, tgt_len, dim]

        # concatenate
        concat_c = torch.cat([c, tgt], 2)  # [batch, tgt_len , 2*dim]
        attn_h = self.linear_out(concat_c)  # [batch, tgt_len , dim]
        attn_h = torch.tanh(attn_h)  # [batch, tgt_len, dim]

        if one_step:
            attn_h = attn_h.squeeze(1)  # [batch, dim]
            align_vectors = align_vectors.squeeze(1)  # [batch, src_len]
        else:
            attn_h = attn_h  # [batch, tgt_len, dim]
            align_vectors = align_vectors  # [batch, tgt_len, src_len]

        return attn_h, align_vectors
