import numpy as np


class RNN:
    def __init__(self, input_dim, hidden_dim, output_dim, lr=0.01):
        """
        初始化RNN模型

        参数:
        input_dim -- 输入维度
        hidden_dim -- 隐藏层维度
        output_dim -- 输出维度
        lr -- 学习率
        """
        # 初始化权重和偏置
        self.w = np.random.randn(hidden_dim, input_dim) * 0.01  # 输入层到隐藏层权重
        self.v = np.random.randn(hidden_dim, hidden_dim) * 0.01  # 隐藏层到隐藏层权重（循环连接）
        self.b = np.zeros((hidden_dim, 1))  # 隐藏层偏置

        self.z = np.random.randn(output_dim, hidden_dim) * 0.01  # 隐藏层到输出层权重
        self.c = np.zeros((output_dim, 1))  # 输出层偏置

        self.lr = lr  # 学习率
        self.hidden_dim = hidden_dim
        self.h_cur = np.zeros((hidden_dim, 1))  # 当前时刻的隐藏状态

        # 存储中间值用于反向传播
        self.inputs = {}
        self.h_states = {}
        self.outputs = {}

    def forward(self, x, h_pre):
        """
        前向传播

        参数:
        x -- 当前时刻输入
        h_pre -- 上一时刻的隐藏状态

        返回:
        h_cur -- 当前时刻的隐藏状态
        """
        # 输入层到隐藏层
        u = np.dot(self.w, x) + np.dot(self.v, h_pre) + self.b
        self.h_cur = np.tanh(u)  # 激活函数为tanh
        return self.h_cur

    def output(self, h):
        """
        隐藏层到输出层

        参数:
        h -- 隐藏层状态

        返回:
        y -- 输出
        """
        y = np.dot(self.z, h) + self.c
        return y

    def backward(self, x_seq, y_true_seq, y_pred_seq, h_seq):
        """
        反向传播算法

        参数:
        x_seq -- 输入序列
        y_true_seq -- 真实输出序列
        y_pred_seq -- 预测输出序列
        h_seq -- 隐藏状态序列

        返回:
        losses -- 损失
        """
        time_steps = len(x_seq)
        dw = np.zeros_like(self.w)
        dv = np.zeros_like(self.v)
        db = np.zeros_like(self.b)
        dz = np.zeros_like(self.z)
        dc = np.zeros_like(self.c)

        dh_next = np.zeros_like(h_seq[0])

        losses = []

        # 逆向遍历时间步
        for t in reversed(range(time_steps)):
            # 计算当前时刻的损失
            loss = np.mean((y_pred_seq[t] - y_true_seq[t]) ** 2)
            losses.append(loss)

            # 输出层误差
            dy = 2 * (y_pred_seq[t] - y_true_seq[t])

            # 隐藏层到输出层的梯度
            dz += np.dot(dy, h_seq[t].T)
            dc += dy

            # 隐藏层误差
            dh = np.dot(self.z.T, dy) + dh_next

            # tanh梯度
            dtanh = (1 - h_seq[t] ** 2) * dh

            # 更新隐藏层偏置梯度
            db += dtanh

            # 更新权重梯度
            if t > 0:
                dw += np.dot(dtanh, x_seq[t].T)
                dv += np.dot(dtanh, h_seq[t - 1].T)
                dh_next = np.dot(self.v.T, dtanh)
            else:
                dw += np.dot(dtanh, x_seq[t].T)
                dv += np.dot(dtanh, np.zeros((self.hidden_dim, 1)).T)

        # 梯度裁剪，防止梯度爆炸
        for grad in [dw, dv, db, dz, dc]:
            np.clip(grad, -1, 1, out=grad)

        # 更新参数
        self.w -= self.lr * dw
        self.v -= self.lr * dv
        self.b -= self.lr * db
        self.z -= self.lr * dz
        self.c -= self.lr * dc

        return np.mean(losses)

    def train(self, x_seq, y_seq, epochs=100):
        """
        训练模型

        参数:
        x_seq -- 输入序列
        y_seq -- 目标序列
        epochs -- 训练轮数

        返回:
        loss_history -- 损失历史
        """
        loss_history = []

        for epoch in range(epochs):
            # 初始化隐藏状态
            h = np.zeros((self.hidden_dim, 1))
            h_seq = [h]
            y_pred_seq = []

            # 前向传播
            for t in range(len(x_seq)):
                h = self.forward(x_seq[t], h)
                y_pred = self.output(h)

                h_seq.append(h)
                y_pred_seq.append(y_pred)

            # 反向传播
            loss = self.backward(x_seq, y_seq, y_pred_seq, h_seq[1:])
            loss_history.append(loss)

            if epoch % 10 == 0:
                print(f"Epoch {epoch}, Loss: {loss}")

        return loss_history

    def predict(self, x_seq):
        """
        使用模型进行预测

        参数:
        x_seq -- 输入序列

        返回:
        y_pred_seq -- 预测序列
        """
        h = np.zeros((self.hidden_dim, 1))
        y_pred_seq = []

        for t in range(len(x_seq)):
            h = self.forward(x_seq[t], h)
            y_pred = self.output(h)
            y_pred_seq.append(y_pred)

        return y_pred_seq