import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial


nonlinearity = partial(F.relu, inplace=True)

class LSTM(nn.Module):

    def __init__(self, in_seq_len=100, out_seq_len=184):
        super(LSTM, self).__init__()
        self.in_seq_len = in_seq_len
        self.out_seq_len = out_seq_len
        self.hidden_size = 5
        self.num_layers = 2
        self.lstm = nn.LSTM(input_size=1, hidden_size=self.hidden_size,
                            num_layers=self.num_layers, bidirectional=False)
        # # fc
        self.fc = nn.Sequential(
            nn.Linear(in_seq_len * self.hidden_size, 512),
            nn.ReLU(inplace=True),
            nn.Linear(512, 256),
            nn.ReLU(inplace=True),
            nn.Linear(256, out_seq_len),
        )

    def forward(self, X):
        # 输入X为模型[B, C, H, W]
        batch_size = X.shape[0]
        input = X.permute(2, 0, 1)
        hidden_state = torch.zeros(self.num_layers, batch_size, self.hidden_size)
        hidden_state = hidden_state.cuda()
        cell_state = torch.zeros(self.num_layers, batch_size, self.hidden_size)
        cell_state = cell_state.cuda()

        outputs, (_, _) = self.lstm(input, (hidden_state, cell_state))
        outputs = outputs.permute(1, 0, 2)
        outputs = outputs.reshape(batch_size, -1)

        outputs = self.fc(outputs)

        outputs = outputs.view(batch_size, 1, self.out_seq_len)

        return outputs


class BiLSTM(nn.Module):

    def __init__(self, in_seq_len=100, out_seq_len=184):
        super(BiLSTM, self).__init__()
        self.in_seq_len = in_seq_len
        self.out_seq_len = out_seq_len
        self.hidden_size = 5
        self.num_layers = 2
        self.lstm = nn.LSTM(input_size=1, hidden_size=self.hidden_size,
                            num_layers=self.num_layers, bidirectional=True)
        
        # fc
        self.fc = nn.Sequential(
            nn.Linear(in_seq_len * self.num_layers * self.hidden_size, 512),
            # 如果指定inplace = True，则对于上层网络传递下来的tensor直接进行修改，可以少存储变量y，节省运算内存。
            nn.ReLU(inplace=True),
            nn.Linear(512, 256),
            nn.ReLU(inplace=True),
            nn.Linear(256, out_seq_len)

        )

    def forward(self, X):
        # 输入X为模型[B, C, H, W]
        batch_size = X.shape[0]
        input = X.permute(2, 0, 1)
        hidden_state = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size)
        hidden_state = hidden_state.cuda()
        cell_state = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size)
        cell_state = cell_state.cuda()

        outputs, (_, _) = self.lstm(input, (hidden_state, cell_state))
        outputs = outputs.permute(1, 0, 2)
        outputs = outputs.reshape(batch_size, -1)

        outputs = self.fc(outputs)

        outputs = outputs.view(batch_size, 1, self.out_seq_len)

        return outputs

