import torch.nn as nn
import torch, math, sys


# https://github.com/sdoria/SimpleSelfAttention
class SimpleSelfAttention2(nn.Module):
    def __init__(self, n_in: int, ks=1):  #, n_out:int):
        super().__init__()

        self.conv = nn.Conv1d(n_in, n_in, ks, padding=ks // 2, bias=False)
        self.gamma = nn.Parameter(torch.tensor([0.]))
        self.n_in = n_in

    def forward(self, x):

        size = x.size()
        x = x.view(*size[:2], -1)  # (C,N)
        convx = self.conv(x)  # (C,C) * (C,N) = (C,N)   => O(NC^2)
        xxT = torch.bmm(x, x.permute(0, 2, 1).contiguous())  # (C,N) * (N,C) = (C,C)   => O(NC^2)
        o = torch.bmm(xxT, convx)  # (C,C) * (C,N) = (C,N)   => O(NC^2)
        o = self.gamma * o + x

        return o.view(*size).contiguous()
