import torch
import torch.nn as nn
import torch.nn.functional as F

from typing import Tuple

class SelfAttentionModule(nn.Module):

    def __init__(self, t_dim: int, d_dim: int, dropout_rate: float = 0.) -> None:
        super(SelfAttentionModule, self).__init__()
        self.dropout = nn.Dropout(p=dropout_rate)
        self.weights = nn.Parameter(torch.empty(t_dim, d_dim), requires_grad=True)
        nn.init.xavier_uniform_(self.weights) # initialize weights

    def forward(self, M: torch.tensor) -> Tuple[ torch.tensor, torch.tensor ]:
        '''Args:
        Input:
            M: 3D tensor - matrix to compute self-attention (BxHxW)
        Output:
            result: 3D tensor - result matrix (BxHxW)
            attention: 3D tensor - attention matrix (BxHxH)
        '''
        # self attention: Query matrix = Key matrix = Value matrix
        q = torch.mul(M, self.weights) # (b, h, w)
        k = M.permute(0, 2, 1) # (b, w, h)
        v = M # (b, h, w)
        attention = torch.bmm(q, k) # (b, h, h)
        attention = F.softmax(attention, dim=-1)
        attention = self.dropout(attention)
        output = attention.bmm(v) # (b, h, w)
        return output, attention