import torch
from torch import nn


class GRU(nn.Module):
    def __init__(self, vocab_size, hidden_size, num_layers=1, bidirectional=False, device=None):
        super().__init__()

        self.device = device if device else torch.device("cuda" if torch.cuda.is_available() else "cpu")

        def three(vocab_size, hidden_size):
            return (
                nn.Parameter(torch.randn((vocab_size, hidden_size), requires_grad=True, device=self.device)),
                nn.Parameter(torch.randn((hidden_size, hidden_size), requires_grad=True, device=self.device)),
                nn.Parameter(torch.randn((hidden_size,), requires_grad=True, device=self.device))
            )

        # 定义重置门与更新门
        self.w_xr, self.w_hr, self.b_r = three(vocab_size, hidden_size)
        self.w_xz, self.w_hz, self.b_z = three(vocab_size, hidden_size)

        # 隐藏状态
        self.w_xh, self.w_hh, self.b_h = three(vocab_size, hidden_size)

        # 使用隐藏状态预测
        self.w_xq = nn.Parameter(torch.randn((hidden_size, vocab_size), requires_grad=True, device=self.device))
        self.b_q = nn.Parameter(torch.randn((vocab_size,), requires_grad=True, device=self.device))

    def forward(self, x, H):
        outputs = []
        for item in x:
            # 重置门
            R = torch.sigmoid(item @ self.w_xr + H @ self.w_hr + self.b_r)
            # 更新门
            Z = torch.sigmoid(item @ self.w_xz + H @ self.w_hz + self.b_z)
            # 计算候选隐藏状态
            H_hat = torch.tanh(item @ self.w_xh + (H * R) @ self.w_hh + self.b_h)
            # 计算隐藏状态（根据前一刻的隐藏状态与当前候选隐藏状态的比例结合，计算当前时刻的隐藏状态）
            H = Z * H + (1 - Z) * H_hat
            # 计算预测值
            y_hat = H @ self.w_xq + self.b_q
            outputs.append(y_hat)

        return torch.stack(outputs, dim=0), H


if __name__ == '__main__':
    device = torch.device("cuda")
    inputs = torch.randn(5, 3, 10, device=device)
    h0 = torch.randn(3, 20, device=device)
    model = GRU(10, 20, device=device)
    outputs, _ = model(inputs, h0)
    print(outputs.shape)
