import torch
from torch import nn


class RNN(nn.Module):
    def __init__(self, vocab_size, hidden_size, num_layers=1, device=None):
        super().__init__()
        self.vocab_size = vocab_size
        self.hidden_size = hidden_size

        self.device = device if device else torch.device("cuda" if torch.cuda.is_available() else "cpu")

        self.rnn = nn.RNN(vocab_size, hidden_size, num_layers, device=device)  # 时间序的隐藏状态求解
        self.fc = nn.Linear(hidden_size, vocab_size, device=device)  # 隐藏状态预测输出

    def forward(self, x, h):
        # 注意：x的形状是3维 (seq_len, batch_size, vocab_size)
        # 注意：h的形状是3维 (num_layers, batch_size, hidden_size)
        x0, h = self.rnn(x, h)  # x0是(seq_len, batch_size, hidden_size)
        x0 = x0.reshape(-1, self.hidden_size)  # (seq_len * batch_size, hidden_size)
        y = self.fc(x0)
        return y.reshape(x.shape), h


if __name__ == '__main__':
    vocab_size = 10
    hidden_size = 20
    batch_size = 3
    seq_len = 6
    inputs = torch.randn(seq_len, batch_size, vocab_size)  # (6,3,10)
    h0 = torch.randn(1, batch_size, hidden_size)  # (1,3,20)
    model = RNN(vocab_size, hidden_size)
    outputs, h = model(inputs, h0)

    print(outputs.shape, h.shape)  # (6,3,10)  (1,3,20)
