import torch
from torch import nn


class RNN(nn.Module):
    def __init__(self, vocab_size, hidden_size, device=None):
        super().__init__()

        self.device = device if device else torch.device("cuda" if torch.cuda.is_available() else "cpu")

        def norm(in_channels, out_channels):
            return torch.randn((in_channels, out_channels), requires_grad=True, device=self.device)

        # 为了使model.parameters中包含梯度的变量，需要加nn.Parameter
        self.w_xh = nn.Parameter(norm(vocab_size, hidden_size))
        self.w_hh = nn.Parameter(norm(hidden_size, hidden_size))
        self.b_h = nn.Parameter(torch.randn((hidden_size,), requires_grad=True, device=self.device))

        self.w_hq = nn.Parameter(norm(hidden_size, vocab_size))
        self.b_q = nn.Parameter(torch.randn((vocab_size,), requires_grad=True, device=self.device))

    # 输入 x:(seq_len,batch_size,vocab_size)
    # 初始隐藏状态 h: (batch_size,hidden_size)
    def forward(self, x, h):
        outputs = []
        for item in x:  # item:(batch_size,vocab_size)
            h = torch.tanh(item @ self.w_xh + h @ self.w_hh + self.b_h)
            y_hat = h @ self.w_hq + self.b_q
            outputs.append(y_hat)
        return torch.stack(outputs, dim=0), h  # 此处的h一般在编解码器中使用


# if __name__ == '__main__':
#     device = torch.device("cuda")
#     batch_size = 3
#     vocab_size = 10
#     inputs = torch.randn(5, batch_size, vocab_size, device=device)  # 5：数据的长度 3:批次长度（多少个数据） 10: 特征的数量（特征维度）
#
#     # hidden_size = ? (建议>= vocab_size)
#     hidden_size = 20
#     model = RNN(vocab_size, hidden_size, device=device)
#     # 隐藏状态初始值
#     h0 = torch.randn((batch_size, hidden_size), device=device)
#     results, h = model(inputs, h0)
#     print(results.shape, h.shape)
