import torch
import torch.nn as nn
import torch.nn.functional as F



class MyLSTM(nn.Module):
    
    
    def __init__(self, feature_size, d_model, hidden_size, num_layers, num_token, dropout) -> None:
        super().__init__()
        self.linear = nn.Sequential(nn.Linear(feature_size, d_model), 
                                     nn.ReLU())
        self.tok_emb = nn.Embedding(num_token, d_model)

        self.lstm = nn.LSTM(d_model, hidden_size, num_layers, batch_first=True, dropout=dropout)
        self.out = nn.Linear(hidden_size, 1)
        self.sigmoid = nn.Sigmoid()
        
    
    def forward(self, x, token=None):
        
        x = self.linear(x)
        b, t, k = x.size()
        if token is not None:
            tok_emb = self.tok_emb(token)
            tok_emb = tok_emb.expand(t, b, k).transpose(0, 1)
            x = x + tok_emb
        x, hiddens = self.lstm(x)
        x = self.out(x)
        x = self.sigmoid(x)
        return x


class MyGRU(nn.Module):
    
    
    def __init__(self, feature_size, d_model, hidden_size, num_layers, num_token, dropout) -> None:
        super().__init__()
        self.linear = nn.Sequential(nn.Linear(feature_size, d_model), 
                                     nn.ReLU())
        self.tok_emb = nn.Embedding(num_token, d_model)

        self.gru = nn.GRU(d_model, hidden_size, num_layers, batch_first=True, dropout=dropout)
        self.out = nn.Linear(hidden_size, 1)
        self.sigmoid = nn.Sigmoid()
        
    
    def forward(self, x, token=None):
        
        x = self.linear(x)
        b, t, k = x.size()
        if token is not None:
            tok_emb = self.tok_emb(token)
            tok_emb = tok_emb.expand(t, b, k).transpose(0, 1)
            x = x + tok_emb
        x, hiddens = self.gru(x)
        x = self.out(x)
        x = self.sigmoid(x)
        return x