import torch
import torch.nn as nn
import math


class MHA(nn.Module):
    def __init__(self, hidden_dim, heads, dropout=0.1):
        super(MHA, self).__init__()

        self.hidden_dim = hidden_dim
        self.heads = heads
        self.head_dim = hidden_dim // heads


        self.queries = nn.Linear(hidden_dim, hidden_dim)
        self.keys = nn.Linear(hidden_dim, hidden_dim)
        self.values = nn.Linear(hidden_dim, hidden_dim)

        self.out_proj = nn.Linear(hidden_dim, hidden_dim)
        self.att_drop = nn.Dropout(dropout)


    def forward(self, x, attention_mask = None):

        batch_size, seq, _ = x.size()

        # b,s,hidden_dim
        q = self.queries(x)
        k = self.keys(x)
        v = self.values(x)

        q = q.view(batch_size, seq, self.heads, self.head_dim).permute(0, 2, 1, 3)
        k = k.view(batch_size, seq, self.heads, self.head_dim).permute(0, 2, 1, 3)
        v = v.view(batch_size, seq, self.heads, self.head_dim).permute(0, 2, 1, 3)

        # b hs,s, hd  @ b, hs, hd, s  -> b, hs, s, s
        att_weight = (q @ k.transpose(-1,-2)) / math.sqrt(self.head_dim)

        if attention_mask is not None:
            att_weight = att_weight.masked_fill(attention_mask == 0, -1e9)

        att_weight = torch.softmax(att_weight, dim=-1)
        att_weight = self.att_drop(att_weight)

        # b hs s s   b hs s hd
        out = att_weight @ v
        out = out.permute(0, 2, 1, 3).contiguous()

        out = out.view(batch_size, seq, -1)
        out = self.out_proj(out)

        return out




