from torch import nn
import torch


class GRU(nn.Module):
    def __init__(self, vocab_size, hidden_size, num_layers=1, bidirectional=False, device=None):
        super().__init__()
        self.device = device if device else torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers
        self.bidirectional = bidirectional

        self.gru = nn.GRU(vocab_size, hidden_size, num_layers, batch_first=True, bidirectional=bidirectional,
                          device=self.device)
        self.fc = nn.Linear(hidden_size * 2 if bidirectional else hidden_size, vocab_size, device=self.device)

    def forward(self, x, h):
        x = x.to(self.device)
        batch_size = x.shape[0]
        seq_len = x.shape[1]
        assert h.shape[0] == 2 * self.num_layers if self.bidirectional else self.num_layers
        result, h = self.gru(x, h)
        result = result.reshape(batch_size * seq_len, -1)
        return self.fc(result), h
