from torch import nn
import torch
"""
    input_size
    hidden_size
    num_layers
    bidirectional
"""

class MHAttention(nn.Module,):
    def __init__(self,input_dim,num_hiddens,num_head,dropout = 0.3,bias = False):
        super(MHAttention, self).__init__()
        self.W_q = nn.Linear(input_dim,num_hiddens)
        self.W_k = nn.Linear(input_dim,num_hiddens)
        self.W_v = nn.Linear(input_dim,num_hiddens)
        self.attn = nn.MultiheadAttention(embed_dim= num_hiddens,num_heads=num_head,dropout=dropout,batch_first=True)
        self.W_o = nn.Linear(num_hiddens,input_dim)
    def forward(self,x):
        attn_output,_= self.attn(self.W_q(x),self.W_k(x),self.W_v(x))
        return self.W_o(attn_output)

class AttentionBlock(nn.Module):
    def __init__(self,input_dim,num_hiddens,num_head,dropout = 0.3):
        super(AttentionBlock, self).__init__()
        self.attn = MHAttention(input_dim,num_hiddens,num_head,dropout)
        self.norm1 = nn.LayerNorm(input_dim)
        self.norm2 = nn.LayerNorm(input_dim)
        self.fc = nn.Sequential(
            nn.Linear(input_dim,input_dim * 2),
            nn.ReLU(),
            nn.Linear(input_dim * 2, input_dim)
        )
    def forward(self, x):
        x_ = self.attn(x)
        x_ = self.norm1(x + x_)
        x = self.fc(x_)
        return self.norm2(x + x_)

class myLSTM(nn.Module):
    def __init__(self,hidden_size,input_size,num_layers,bidirectional = False):
        super(myLSTM, self).__init__()
        self.net = nn.LSTM(input_size=input_size,
                           hidden_size=hidden_size,
                           bidirectional=bidirectional,
                           num_layers=num_layers,
                           batch_first=True)
    def forward(self,x):
        output,(hn,_) = self.net(x)
        return output,hn

class MODEL(nn.Module):
    def __init__(self,hidden_size,input_size,num_layers,bidirectional = False):
        super(MODEL, self).__init__()
        self.lstm = myLSTM(input_size=input_size,hidden_size= hidden_size,num_layers=num_layers,bidirectional=bidirectional)
        self.fc = nn.Sequential(
            nn.Linear(num_layers*hidden_size,50),
            nn.LeakyReLU(),
            nn.Dropout(p=0.3),
            nn.Linear(50,1),

        )
    def forward(self,x):
        #x = self.mha1(x)
        _,x = self.lstm(x)
        x = x.transpose(0, 1)
        x = x.contiguous()
        x = x.view(x.size()[0],-1)
        return self.fc(x)

def main():
    x = torch.randn(20,50,2048)
    input_size = 2048
    hidden_size = 50
    num_layers = 2
    bidirectional = False
    model = MODEL(input_size=input_size,hidden_size= hidden_size,num_layers=num_layers,bidirectional=bidirectional)
    output = model(x)
    #print(hn.shape)

if __name__ == '__main__':
    main()