# encoding: utf-8
import math
import torch
import torch.nn as nn
import torch.nn.functional as F

a = torch.randn(4, 4)
print(a)
# tensor([[ 0.9105,  0.1080, -0.2465,  1.8417],
#         [ 0.2210,  0.3447, -2.0660,  0.7162],
#         [-0.0277, -0.0303, -0.4582, -0.6497],
#         [-0.1733,  0.9065,  0.5338,  1.0596]])
mask = torch.tensor([
    [False, False, False, True],
    [False, False, True, True],
    [False, True, True, True],
    [True, True, True, True]
])  # mask不一定要与a的形状相同，只要能广播成a的形状即可
b = a.masked_fill(mask, 0)
print(b)


# tensor([[ 0.9105,  0.1080, -0.2465,  0.0000],
#         [ 0.2210,  0.3447,  0.0000,  0.0000],
#         [-0.0277,  0.0000,  0.0000,  0.0000],
#         [ 0.0000,  0.0000,  0.0000,  0.0000]])

class AdditiveAttention(nn.Module):
    def __init__(self, query_size, key_size, hidden_size, drouput=0):
        super().__init__()
        self.W_q = nn.Linear(query_size, hidden_size, bias=False)
        self.W_k = nn.Linear(key_size, hidden_size, bias=False)
        self.W_v = nn.Linear(hidden_size, 1, bias=False)
        self.dropout = nn.Dropout(drouput)

    def forward(self, query, key, value, attn_mask=None):
        """
        Args:
            query: (N, n, d_q)
            key: (N, m, d_k)
            value: (N, m, d_v)
            attn_mask: (N, n, m)
        """
        query, key = self.W_q(query).unsqueeze(2), self.W_k(key).unsqueeze(1)
        scores = self.W_v(torch.tanh(query + key)).squeeze()  # (N, n, m)
        if attn_mask is not None:
            scores = scores.masked_fill(attn_mask, float('-inf'))  # 经过softmax后负无穷的地方会变成0
        attn_weights = F.softmax(scores, dim=-1)  # (N, n, m)
        return self.dropout(attn_weights) @ value  # (N, n, d_v)


class ScaledDotProductAttention(nn.Module):
    def __init__(self, dropout=0):
        super().__init__()
        self.dropout = nn.Dropout(dropout)

    def forward(self, query, key, value, attn_mask=None):
        """
        Args:
            query: (N, n, d_k)
            key: (N, m, d_k)
            value: (N, m, d_v)
            attn_mask: (N, n, m)
        """
        assert query.size(2) == key.size(2)
        scores = query @ key.transpose(1, 2) / math.sqrt(query.size(2))
        if attn_mask is not None:
            scores = scores.masked_fill(attn_mask, float('-inf'))
        attn_weights = F.softmax(scores, dim=-1)
        return self.dropout(attn_weights) @ value
