import torch
import torch.nn as nn
import torch.nn.functional as F


class ScaledDotProductAttention(nn.Module):
    def __init__(self, d_model, d_k):
        super(ScaledDotProductAttention, self).__init__()
        self.scale = torch.sqrt(torch.tensor(d_k, dtype=torch.float32))

    def forward(self, Q, K, V, mask=None):
        attention = torch.matmul(Q, K.transpose(-2, -1)) / self.scale
        
        if mask is not None:
            attention = attention.masked_fill(mask == 0, float('-1e20'))
        
        attention = F.softmax(attention, dim=-1)
        output = torch.matmul(attention, V)
        return output, attention


class MultiHeadAttention(nn.Module):
    def __init__(self, d_model, n_heads):
        super(MultiHeadAttention, self).__init__()
        self.n_heads = n_heads
        self.d_k = d_model // n_heads
        self.qkv_linear = nn.Linear(d_model, d_model * 3, bias=False)
        self.out_linear = nn.Linear(d_model, d_model)

    def forward(self, x, mask=None):
        N, C = x.shape
        qkv = self.qkv_linear(x).reshape(N, -1, 3, self.n_heads, self.d_k).permute(2, 0, 3, 1, 4)
        q, k, v = qkv[0], qkv[1], qkv[2]

        attn_output, attn_output_weights = ScaledDotProductAttention(self.d_k, self.d_k)(q, k, v, mask)

        attn_output = attn_output.permute(0, 2, 1, 3).reshape(N, -1, C)

        final_output = self.out_linear(attn_output)
        return final_output, attn_output_weights


class PositionwiseFeedForward(nn.Module):
    def __init__(self, d_model, d_ff, dropout=0.1):
        super(PositionwiseFeedForward, self).__init__()
        self.w1 = nn.Linear(d_model, d_ff)
        self.w2 = nn.Linear(d_ff, d_model)
        self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        return self.w2(self.dropout(F.relu(self.w1(x))))

