from torch import nn
import torch
class SENET(nn.Module):
    def __init__(self,channel,reduction = 4):
        super(SENET, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction, bias=False),
            nn.ReLU(inplace=True),
            nn.Linear(channel // reduction, channel, bias=False),
            nn.Sigmoid()
        )

    def forward(self,x):
        b,t,_ = x.size()
        y = self.avg_pool(x).view(b,t)
        y = self.fc(y).view(b,t,1)
        return x * y.expand_as(x)


class MHAttention(nn.Module,):
    def __init__(self,input_dim,num_hiddens,num_head,dropout = 0.3,bias = False):
        super(MHAttention, self).__init__()
        self.W_q = nn.Linear(input_dim,num_hiddens)
        self.W_k = nn.Linear(input_dim,num_hiddens)
        self.W_v = nn.Linear(input_dim,num_hiddens)
        self.attn = nn.MultiheadAttention(embed_dim= num_hiddens,num_heads=num_head,dropout=dropout,batch_first=True)
        self.W_o = nn.Linear(num_hiddens,input_dim)

    def forward(self,x):
        attn_output,_= self.attn(self.W_q(x),self.W_k(x),self.W_v(x))
        return self.W_o(attn_output)


class AttentionBlock(nn.Module):
    def __init__(self,input_dim,num_hiddens,num_head,dropout = 0.3):
        super(AttentionBlock, self).__init__()
        self.attn = MHAttention(input_dim,num_hiddens,num_head,dropout)
        self.norm1 = nn.LayerNorm(input_dim)
        self.norm2 = nn.LayerNorm(input_dim)
        self.fc = nn.Sequential(
            nn.Linear(input_dim,input_dim * 2),
            nn.ReLU(),
            nn.Linear(input_dim * 2, input_dim)
        )

    def forward(self, x):
        x_ = self.attn(x)
        x_ = self.norm1(x + x_)
        x = self.fc(x_)
        return self.norm2(x + x_)


class MODEL(nn.Module):
    def __init__(self,input_dim,num_hiddens,num_head,window,dropout = 0.3):
        super(MODEL, self).__init__()
        self.se = SENET(channel=window)
        self.attn = nn.Sequential(
            AttentionBlock(input_dim,num_hiddens,num_head)
        )
        self.lstm =nn.LSTM(input_size=input_dim,hidden_size= 30,num_layers=3,batch_first=True)
        #self.fc = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Sequential(
            nn.Linear(90,30),
            nn.LeakyReLU(),
            nn.Linear(30,1)
        )
    def forward(self,x):
        x= self.attn(x) + self.se(x)
        output,(hn,_)  = self.lstm(x)
        hn = hn.transpose(0,1).contiguous()
        hn = hn.view(hn.size()[0],-1)
        return self.fc(hn)


if __name__ == '__main__':
    input_dim = 2048
    num_hiddens = 1024
    num_head = 1
    dropout = 0.3
    window = 30
    model = MODEL(input_dim,num_hiddens,num_head,window)
    x = torch.randn(size = (16,window,2048))
    y = model(x)
    print(y.shape)
