import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from torch.autograd import Variable


class SelfAttentionDot(nn.Module):
    def __init__(self, hidden_dim, attention_dim):
        super(SelfAttentionDot, self).__init__()

        self.map_2_u = nn.Sequential(
            nn.Linear(hidden_dim, attention_dim, bias=False),
        )
        self.context_u = nn.Parameter(torch.FloatTensor(attention_dim, 1))
        nn.init.uniform_(self.context_u)

    def forward(self, hidden_state):
        """
        :param hidden_state: batch_size * n * d
        :return: batch_size * d
        """
        u_mat = self.map_2_u(hidden_state)
        batched_context_u = torch.stack([self.context_u] * hidden_state.shape[0], dim=0)
        alignment = torch.bmm(u_mat, batched_context_u)
        alignment = F.softmax(alignment, dim=1)
        result = torch.bmm(alignment.transpose(1, 2), hidden_state)
        return result.squeeze(1)


if __name__ == '__main__':
    h = Variable(torch.rand(1, 30, 128))

    self_attn = SelfAttentionDot(128, 128)
    r = self_attn(h)
    print(r.shape)