from math import sqrt
import torch
import torch.nn as nn

class Self_attention(nn.Module):
    # input x = batch_size * seq_len * input_dim
    # q : batch_size * seq_len * dim_q
    # k : batch_size * seq_len * dim_k
    # v : batch_sizze *seq_len * dim_v
    def __init__(self, input_dim, dim_k, dim_v):
        super(Self_attention, self).__init__()
        self.q = nn.Linear(input_dim, dim_k)
        self.k = nn.Linear(input_dim, dim_k)
        self.v = nn.Linear(input_dim, dim_v)
        self._norm_fact = 1 / sqrt(dim_k)

    def forward(self, x):
        Q = self.q(x)
        K = self.k(x)
        V = self.v(x)

        atten = nn.Softmax(dim=-1)(torch.bmm(Q, K.permute(0, 2, 1))) * self._norm_fact
        output = torch.bmm(atten, V)

        return output

if __name__ == '__main__':
    x = torch.randn(4,3,2)
    # print(x)
    self_atten = Self_attention(2,4,5)   # initial input_dim, dim_k, dim_v three
    out = self_atten(x)
    print(out)

