import torch
from torch import nn


class SENET(nn.Module):
    def __init__(self,channel,reduction = 4):
        super(SENET, self).__init__()
        self.avg_pool = nn.AdaptiveAvgPool1d(1)
        self.fc = nn.Sequential(
            nn.Linear(channel, channel // reduction, bias=False),
            nn.Linear(channel // reduction, channel, bias=False),
            nn.Sigmoid()
        )

    def forward(self,x):
        b,t,_ = x.size()
        y = self.avg_pool(x).view(b,t)
        y = self.fc(y).view(b,t,1)
        return x * y.expand_as(x)


class MHAttention(nn.Module,):
    def __init__(self,input_dim,num_hiddens,num_head,dropout = 0.3,bias = False):
        super(MHAttention, self).__init__()
        self.W_q = nn.Linear(input_dim,num_hiddens)
        self.W_k = nn.Linear(input_dim,num_hiddens)
        self.W_v = nn.Linear(input_dim,num_hiddens)
        self.attn = nn.MultiheadAttention(embed_dim= num_hiddens,num_heads=num_head,dropout=dropout,batch_first=True)
        self.W_o = nn.Linear(num_hiddens,input_dim)

    def forward(self,x):
        attn_output,_= self.attn(self.W_q(x),self.W_k(x),self.W_v(x))
        return self.W_o(attn_output)


class AttentionBlock(nn.Module):
    def __init__(self,input_dim,num_hiddens,num_head,dropout = 0.3):
        super(AttentionBlock, self).__init__()
        self.attn = MHAttention(input_dim,num_hiddens,num_head,dropout)
        self.norm1 = nn.LayerNorm(input_dim)
        self.norm2 = nn.LayerNorm(input_dim)
        self.fc = nn.Sequential(
            nn.Linear(input_dim,input_dim * 2),
            nn.ReLU(),
            nn.Linear(input_dim * 2, input_dim)
        )

    def forward(self, x):
        x_ = self.attn(x)
        x_ = self.norm1(x + x_)
        x = self.fc(x_)
        return self.norm2(x + x_)


class MODEL(nn.Module):
    def __init__(self,input_dim,num_hiddens,num_head,window,dropout = 0.3):
        super(MODEL, self).__init__()
        self.se = SENET(channel=window)
        self.attn = nn.Sequential(
            AttentionBlock(input_dim,num_hiddens,num_head)
        )
        self.cnn = nn.Sequential(
            nn.Conv1d(in_channels=window,out_channels=window * 2,kernel_size=7,stride=2,padding=3 ),
            nn.ReLU(),
            nn.BatchNorm1d(num_features=window * 2),
            #nn.LayerNorm(1024),
            nn.MaxPool1d(kernel_size=3,stride=1,padding=1),
            nn.Conv1d(in_channels=window * 2, out_channels=window * 4,kernel_size=5,stride=2,padding=2),
            nn.ReLU(),
            nn.BatchNorm1d(num_features=window * 4),
            # nn.LayerNorm(512),
            nn.MaxPool1d(kernel_size=3,stride=1,padding=1),
            nn.Conv1d(in_channels=window * 4, out_channels=window, kernel_size=3, stride=2, padding=1),
            nn.ReLU(),
            nn.BatchNorm1d(window),
            #nn.LayerNorm(256)
            nn.Conv1d(in_channels=window, out_channels=3, kernel_size=3, stride=1, padding=1),
        )
        self.fc = nn.Sequential(
            nn.Linear(768,256),
            nn.LeakyReLU(),
            nn.Linear(256,64),
            nn.LeakyReLU(),
            nn.Linear(64,1)
        )

    def forward(self,x):
        x= self.attn(x) + self.se(x)
        x = self.cnn(x)
        x = x.contiguous().view(x.shape[0],-1)
        x = self.fc(x)
        return x


if __name__ == '__main__':
    input_dim = 2048
    num_hiddens = 1024
    num_head = 1
    dropout = 0.3
    window = 30
    model = MODEL(input_dim,num_hiddens,num_head,window)
    x = torch.randn(size = (16,window,2048))
    y = model(x)
    print(y.shape)
    # torch.save(model,'./model.pt')
