import torch
from torch import nn


class LSTM(nn.Module):
    def __init__(self, vocab_size, hidden_size, num_layers=1, bidirectional=False, device=None):
        super().__init__()

        self.device = device if device else torch.device("cuda" if torch.cuda.is_available() else "cpu")

        def three(vocab_size, hidden_size):
            return (
                nn.Parameter(torch.randn((vocab_size, hidden_size), requires_grad=True, device=self.device)),
                nn.Parameter(torch.randn((hidden_size, hidden_size), requires_grad=True, device=self.device)),
                nn.Parameter(torch.randn((hidden_size,), requires_grad=True, device=self.device))
            )

        # 定义输入门、遗忘门、输出门
        self.w_xi, self.w_hi, self.b_i = three(vocab_size, hidden_size)
        self.w_xf, self.w_hf, self.b_f = three(vocab_size, hidden_size)
        self.w_xo, self.w_ho, self.b_o = three(vocab_size, hidden_size)

        # 记忆元
        self.w_xc, self.w_hc, self.b_c = three(vocab_size, hidden_size)

        # 使用隐藏状态预测
        self.w_xq = nn.Parameter(torch.randn((hidden_size, vocab_size), requires_grad=True, device=self.device))
        self.b_q = nn.Parameter(torch.randn((vocab_size,), requires_grad=True, device=self.device))

    def forward(self, x, state):
        H, C = state
        outputs = []
        for item in x:
            # 输入门
            I = torch.sigmoid(item @ self.w_xi + H @ self.w_hi + self.b_i)
            # 遗忘门
            F = torch.sigmoid(item @ self.w_xf + H @ self.w_hf + self.b_f)
            # 输出门
            O = torch.sigmoid(item @ self.w_xo + H @ self.w_ho + self.b_o)
            # 候选记忆元
            C_hat = torch.tanh(item @ self.w_xc + H @ self.w_hc + self.b_c)

            # 当前时刻的记忆元 (由候选记忆元 与 前一时刻的记忆元 决定当前记忆元)
            C = F * C + I * C_hat
            # 计算隐藏状态
            H = O * torch.tanh(C)
            # 计算预测值
            y_hat = H @ self.w_xq + self.b_q
            outputs.append(y_hat)

        return torch.stack(outputs, dim=0), H


if __name__ == '__main__':
    device = torch.device("cuda")
    inputs = torch.randn(5, 3, 10, device=device)
    h0 = torch.randn(3, 20, device=device)
    c0 = torch.randn(3, 20, device=device)
    model = LSTM(10, 20, device=device)
    outputs, _ = model(inputs, (h0, c0))
    print(outputs.shape)
