import torch
from torch.nn import functional as F
from torch.nn.utils.rnn import pack_padded_sequence

class GRU(torch.nn.Module):
    def __init__(self, input_dim=35, hidden_dim=64, output_dim=1, keep_prob=0.5, learning_rate=1e-3, reg=False):
        super().__init__()
        self.input_dim = input_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.keep_prob = keep_prob
        self.reg = reg
        self.gru = torch.nn.GRU(self.input_dim, self.hidden_dim, batch_first = True)
        self.l_out = torch.nn.Linear(self.hidden_dim, self.output_dim)
        self.sigmoid = torch.nn.Sigmoid()
        self.dropout = torch.nn.Dropout(p=1 - keep_prob)

    def forward(self, x, lens):
        batch_size = x.size(0)
        time_step = x.size(1)
        feature_dim = x.size(2)
        x = pack_padded_sequence(x, lens, batch_first=True)
        _, hidden_t = self.gru(x)
        hn = hidden_t.squeeze()
        if self.keep_prob < 1.0:
            hn = self.dropout(hn)
        o = self.l_out(hn)

        if self.output_dim > 1:
            o = F.softmax(o, dim=1)
        elif not self.reg:
            o = self.sigmoid(o)

        return {'output': o}