import math
import torch
import torch.nn as nn
import torch.nn.functional as F

class SelfAttentionV2(nn.Module):
    def __init__(self, dim, dropout_rate = 0.1):
        super().__init__()
        self.dim = dim
        self.fcl_q = nn.Linear(dim, dim)
        self.fcl_k = nn.Linear(dim, dim)
        self.fcl_v = nn.Linear(dim, dim)
        self.dropout = nn.Dropout(dropout_rate)
        self.fcl_output = nn.Linear(dim, dim)

    def forward(self, X, attention_mask=None):
        Q = self.fcl_q(X)
        K = self.fcl_k(X)
        V = self.fcl_v(X)
        attention_weight = torch.matmul(Q, K.transpose(-1, -2)) / math.sqrt(self.dim)
        if attention_mask is not None:
            attention_weight = attention_weight.masked_fill(
                attention_mask == 0,
                float("-inf")
            )
        print("attention_weight:", attention_weight)
        attention_weight = F.softmax(attention_weight, dim = -1)
        attention_weight = self.dropout(attention_weight)
        output = torch.matmul(attention_weight, V)
        output = self.fcl_output(output)
        return output

# (batch_size, seq_len, dim)
X = torch.rand(2, 4, 8)
mask = torch.tensor(
    [
        [1, 1, 1, 0],
        [1, 1, 0, 0],
    ]
)
mask = mask.unsqueeze(dim=1).repeat(1, 4, 1)
print("mask.shape:", mask.shape)
print("mask:", mask)
net = SelfAttentionV2(8)
output = net(X, mask)
print("output:", output)