import math
import torch
import torch.nn as nn
import torch.nn.functional as F

class SelfAttention(nn.Module):
    def __init__(self, dim):
        super().__init__()
        self.dim = dim
        self.fcl_q = nn.Linear(dim, dim)
        self.fcl_k = nn.Linear(dim, dim)
        self.fcl_v = nn.Linear(dim, dim)

    def forward(self, X):
        Q = self.fcl_q(X)
        K = self.fcl_k(X)
        V = self.fcl_v(X)
        atten = F.softmax(
            torch.matmul(Q, K.transpose(-1, -2)) / math.sqrt(self.dim),
            dim = -1
        )
        output = torch.matmul(atten, V)
        return output

# (batch_size, seq_len, dim)
X = torch.rand(2, 16, 8)
self_attention = SelfAttention(8)
output = self_attention(X)
print(output)