import torch.nn as nn
import torch.nn.functional as F
import torch


class LstmModel(nn.Module):
    def __init__(self, input_size, num_classes, hidden_size=64, num_layers=2, dropout=0.5, bidirectional=False):
        super(LstmModel, self).__init__()
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.input_size = input_size
        self.bidirectional = bidirectional

        self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True, dropout=dropout,
                            bidirectional=bidirectional)
        self.activation_func = nn.ReLU()
        self.fc = nn.Linear(hidden_size * 2 if bidirectional else hidden_size, num_classes)
        # self.dropout = nn.Dropout(dropout)

    def forward(self, x):
        if len(x.shape) == 2:
            # x = x.squeeze(0)
            x = x.unsqueeze(-1)
        # Set initial hidden and cell states
        num_layers = self.num_layers * 2 if self.bidirectional else self.num_layers
        h0 = torch.zeros(num_layers, x.size(0), self.hidden_size).to(x.device)
        c0 = torch.zeros(num_layers, x.size(0), self.hidden_size).to(x.device)
        # Forward propagate LSTM
        # x.shape, h0.shape, c0.shape
        out, _ = self.lstm(x, (h0, c0))  # shape = (batch_size, seq_length, hidden_size)
        # Decode the hidden state of the last time step
        # out = self.fc(out[:, -1, :])
        out = self.fc(self.activation_func(out[:, -1, :]))
        # out.shape
        return out

    # def get_loss(self, y_pred, y_true, criterion=None, is_output=True):
    #     res = criterion(y_pred, y_true)
    #     return res


if __name__ == '__main__':
    # test LstmModel
    x = torch.randn(32, 10, 1)  # batch_size, seq_length, input_size
    model = LstmModel(input_size=1, hidden_size=20, num_layers=2, num_classes=1)
    output = model(x)
    print(output.shape)  # should be (32, 5)

