from torch import nn
import torch


class RNN(nn.Module):
    def __init__(self, vocab_size, hidden_size, num_layers=1, bidirectional=False, device=None):
        super().__init__()
        self.device = device if device else torch.device("cuda" if torch.cuda.is_available() else "cpu")
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size
        self.num_layers = num_layers

        self.bi_hidden_size = 2 * hidden_size if bidirectional else hidden_size

        self.rnn = nn.RNN(vocab_size, hidden_size, num_layers, bidirectional=bidirectional)
        self.fc = nn.Linear(self.bi_hidden_size, vocab_size)

    def forward(self, x, h):
        assert h.shape[0] == 2 * self.num_layers
        result, h = self.rnn(x, h)
        result = result.reshape(-1, self.bi_hidden_size)
        return self.fc(result), h


if __name__ == '__main__':
    inputs = torch.randn(5, 3, 10)  # (seq_len, batch_size,vocab_size)
    h0 = torch.randn(2, 3, 20)  # (num_layers * 2, batch_size, hidden_size)
    rnn = RNN(10, 20, 1, bidirectional=True)  # (vocab_size,hidden_size,num_layers)
    outputs, h = rnn(inputs, h0)
    print(outputs.shape)  # (15,10)
    print(h.shape)

    # 练习 使用RNNCell实现
