import math

import torch
import torch.nn as nn

import torch.nn.functional as F

class selfAttention(nn.Module):
    def __init__(self, hidden_dim, dropout = 0.1):
        super(selfAttention, self).__init__()

        self.hidden_dim = hidden_dim

        self.queries = nn.Linear(self.hidden_dim, self.hidden_dim)
        self.keys = nn.Linear(self.hidden_dim, self.hidden_dim)
        self.values = nn.Linear(self.hidden_dim, self.hidden_dim)

        self.att_dropout = nn.Dropout(p=dropout)

        self.out = nn.Linear(self.hidden_dim, self.hidden_dim)

    def forward(self, X, attention_mask = None):
        # X: batch_size, seq, dim
        Q = self.queries(X)
        K = self.keys(X)
        V = self.values(X)

        att_weight = Q @ K.transpose(-1, -2) / math.sqrt(self.hidden_dim)


        if attention_mask is not None:
            att_weight = att_weight.masked_fill(attention_mask == 0, float("-1e9"))


        att_weight = torch.softmax(att_weight, dim=-1)


        att_weight = self.att_dropout(att_weight)

        output = att_weight @ V

        output = self.out(output)

        return output


X = torch.rand(3,4,2)
b = torch.tensor(
    [
        [1, 1, 1, 0],
        [1, 1, 0, 0],
        [1, 0, 0, 0],
    ]
)

print(b.shape)
mask = b.unsqueeze(dim=1).repeat(1, 4, 1)

net = selfAttention(2)
print(net(X, mask).shape)