import math
import torch
import torch.nn as nn
import torch.nn.functional as F

class MultiHeadSelfAttention(nn.Module):
    def __init__(self, dim, head_num, attention_dropout=0.1):
        super().__init__()
        self.dim = dim
        self.head_num = head_num
        self.head_dim = dim // head_num
        self.fcl_q = nn.Linear(dim, dim)
        self.fcl_k = nn.Linear(dim, dim)
        self.fcl_v = nn.Linear(dim, dim)
        self.fcl_out = nn.Linear(dim, dim)
        self.dropout = nn.Dropout(attention_dropout)

    def forward(self, X, attention_mask=None):
        # X (batch_size, seq_len, dim)
        batch, seq_len, _ = X.size()
        Q = self.fcl_q(X)
        K = self.fcl_q(X)
        V = self.fcl_q(X) # (batch_size, seq_len, dim)
        # dim = head_num * head_dim
        # (batch_size, seq_len, dim) => (batch_size, seq_len, head_num, head_dim) => (batch_size, head_num, seq_len, head_dim)
        Q_state = Q.view(batch, seq_len, self.head_num, self.head_dim).transpose(1, 2)
        K_state = K.view(batch, seq_len, self.head_num, self.head_dim).transpose(1, 2)
        V_state = V.view(batch, seq_len, self.head_num, self.head_dim).transpose(1, 2)
        attention_weight = torch.matmul(
            Q_state, K_state.transpose(-1, -2)
        ) / math.sqrt(self.head_dim) # (batch_size, head_num, seq_len, seq_len)
        if attention_mask is not None:
            attention_weight = attention_weight.masked_fill(
                attention_mask == 0, float("-inf")
            )
        attention_weight = F.softmax(attention_weight, -1)
        attention_weight = self.dropout(attention_weight)
        output = torch.matmul(attention_weight, V_state) # (batch_size, head_num, seq_len, head_dim)
        output = output.transpose(1, 2).contiguous()
        output = output.view(batch, seq_len, -1) # (batch_size, seq_len, dim)
        output = self.fcl_out(output)
        return output

# (batch_size, seq_len, dim)
X = torch.rand(2, 16, 8)
self_attention = MultiHeadSelfAttention(8, 2)
output = self_attention(X)
print(output)
print(output.shape)
