import torch
import torch.nn as nn
import torch.nn.functional as F
from functools import partial

nonlinearity = partial(F.relu, inplace=True)


class GRU(nn.Module):

    def __init__(self, in_seq_len=100, out_seq_len=184):
        super(GRU, self).__init__()
        self.in_seq_len = in_seq_len
        self.out_seq_len = out_seq_len
        self.hidden_size = 1
        self.num_layers = 2
        self.gru = nn.GRU(input_size=1, hidden_size=self.hidden_size,
                          num_layers=self.num_layers, bidirectional=False)
        # fc
        self.fc1 = nn.Linear(in_seq_len, 512)
        self.relu1 = nonlinearity
        self.fc2 = nn.Linear(512, 256)
        self.relu2 = nonlinearity
        self.fc3 = nn.Linear(256, out_seq_len)
        self.relu3 = nonlinearity

    def forward(self, X):
        # 输入X为模型[B, C, H, W]
        batch_size = X.shape[0]
        input = X.permute(2, 0, 1)
        hidden_state = torch.zeros(self.num_layers, batch_size, self.hidden_size)
        hidden_state = hidden_state.cuda()
        cell_state = torch.zeros(self.num_layers, batch_size, self.hidden_size)
        cell_state = cell_state.cuda()

        outputs, _ = self.gru(input)
        outputs = outputs.permute(1, 0, 2)
        outputs = outputs.reshape(batch_size, -1)

        outputs = self.fc1(outputs)
        outputs = self.relu1(outputs)
        outputs = self.fc2(outputs)
        outputs = self.relu2(outputs)
        outputs = self.fc3(outputs)
        # outputs = torch.sigmoid(outputs)
        outputs = torch.tanh(outputs)

        # outputs = self.relu3(outputs)
        # outputs = F.softmax(outputs, dim=1)

        outputs = outputs.view(batch_size, 1, self.out_seq_len)

        return outputs


class BiGRU(nn.Module):

    def __init__(self, in_seq_len=100, out_seq_len=184):
        super(BiGRU, self).__init__()
        self.in_seq_len = in_seq_len
        self.out_seq_len = out_seq_len
        self.hidden_size = 1
        self.num_layers = 4
        self.lstm = nn.GRU(input_size=1, hidden_size=self.hidden_size,
                            num_layers=self.num_layers, bidirectional=True)

        # fc
        self.fc1 = nn.Linear(in_seq_len * 2, 512)
        self.relu1 = nonlinearity
        self.fc2 = nn.Linear(512, 256)
        self.relu2 = nonlinearity
        self.fc3 = nn.Linear(256, out_seq_len)
        self.relu3 = nonlinearity

    def forward(self, X):
        # 输入X为模型[B, C, H, W]
        batch_size = X.shape[0]
        input = X.permute(2, 0, 1)
        hidden_state = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size)
        hidden_state = hidden_state.cuda()
        cell_state = torch.zeros(self.num_layers * 2, batch_size, self.hidden_size)
        cell_state = cell_state.cuda()

        outputs, _ = self.lstm(input)
        outputs = outputs.permute(1, 0, 2)
        outputs = outputs.reshape(batch_size, -1)

        outputs = self.fc1(outputs)
        outputs = self.relu1(outputs)
        outputs = self.fc2(outputs)
        outputs = self.relu2(outputs)
        outputs = self.fc3(outputs)
        # outputs = torch.sigmoid(outputs)
        outputs = torch.tanh(outputs)

        # outputs = self.relu3(outputs)
        # outputs = F.softmax(outputs, dim=1)

        outputs = outputs.view(batch_size, 1, self.out_seq_len)

        return outputs

