import torch
import math
import torch.nn as nn

class MHA(nn.Module):

    def __init__(self, heads, hidden_dim, dropout = 0.1):
        super(MHA, self).__init__()


        self.heads = heads
        self.hidden_dim = hidden_dim

        self.head_dim = hidden_dim // heads


        self.queries = nn.Linear(hidden_dim, hidden_dim)
        self.keys = nn.Linear(hidden_dim, hidden_dim)
        self.values = nn.Linear(hidden_dim, hidden_dim)


        self.out_proj = nn.Linear(hidden_dim, hidden_dim)


        self.att_drop = nn.Dropout(dropout)


    def forward(self, X, mask = None):

        batch_size, seq, _ = X.size()

        q = self.queries(X)
        k = self.keys(X)
        v = self.values(X)

        q = q.view(batch_size, seq, self.heads, self.head_dim).transpose(1, 2)
        k = k.view(batch_size, seq, self.heads, self.head_dim).transpose(1, 2)
        v = v.view(batch_size, seq, self.heads, self.head_dim).transpose(1, 2)

        # b,heads, s, hd @ b, heads, hd, s = b, heads, s , s
        att_score = (q @ k.transpose(-2, -1)) / math.sqrt(self.head_dim)

        if mask is not None:
            att_score = att_score.masked_fill(mask == 0, -1e9)

        att_score = self.att_drop(att_score)

        # b, heads, s , s @ b, heads, s, hd = b, heads, s, hd
        out = torch.softmax(att_score, dim=-1) @ v


        out = out.transpose(1, 2).contiguous().view(batch_size, seq, self.heads * self.head_dim)

        out = self.out_proj(out)


        return out






