import numpy as np
from base_nn import Layer


class RNN(Layer):
    """简单RNN层"""

    def __init__(self, input_size, hidden_size, output_size, return_sequences=True):
        super().__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.return_sequences = return_sequences  # 控制是否返回所有时间步的输出

        # 初始化权重
        self.W_xh = np.random.randn(hidden_size, input_size) * np.sqrt(2 / (input_size + hidden_size))
        self.W_hh = np.random.randn(hidden_size, hidden_size) * np.sqrt(2 / (hidden_size + hidden_size))
        self.W_hy = np.random.randn(output_size, hidden_size) * np.sqrt(2 / (hidden_size + output_size))

        # 初始化偏置
        self.b_h = np.zeros((hidden_size, 1))
        self.b_y = np.zeros((output_size, 1))

        # 存储隐藏状态
        self.hidden_states = []
        self.h_prev = None

    def forward(self, input_data, h_prev=None):
        """前向传播"""
        self.input = input_data
        batch_size, seq_length, input_size = input_data.shape

        # 初始化隐藏状态
        if h_prev is None:
            h_prev = np.zeros((self.hidden_size, batch_size))  # 修复：使用self.hidden_size
        self.h_prev = h_prev
        self.hidden_states = [h_prev]

        # 初始化输出
        if self.return_sequences:
            self.output = np.zeros((batch_size, seq_length, self.output_size))
        else:
            self.output = np.zeros((batch_size, self.output_size))

        # 处理序列
        for t in range(seq_length):
            # 获取当前时间步的输入
            x_t = input_data[:, t, :].T  # (input_size, batch_size)

            # 计算隐藏状态
            h_t = np.tanh(np.dot(self.W_xh, x_t) + np.dot(self.W_hh, h_prev) + self.b_h)

            # 计算输出
            y_t = np.dot(self.W_hy, h_t) + self.b_y

            # 存储结果
            if self.return_sequences:
                self.output[:, t, :] = y_t.T
            else:
                # 只保存最后一个时间步的输出
                if t == seq_length - 1:
                    self.output[:, :] = y_t.T

            self.hidden_states.append(h_t)
            h_prev = h_t

        return self.output, h_prev

    def backward(self, output_gradient, learning_rate):
        """反向传播"""
        batch_size, seq_length, _ = output_gradient.shape if self.return_sequences else \
            (output_gradient.shape[0], 1, output_gradient.shape[1])

        # 初始化梯度
        dW_xh = np.zeros_like(self.W_xh)
        dW_hh = np.zeros_like(self.W_hh)
        dW_hy = np.zeros_like(self.W_hy)
        db_h = np.zeros_like(self.b_h)
        db_y = np.zeros_like(self.b_y)

        # 初始化隐藏状态梯度
        dh_next = np.zeros((self.hidden_size, batch_size))

        # 反向处理序列
        for t in reversed(range(seq_length)):
            # 获取当前时间步的梯度
            if self.return_sequences:
                dy_t = output_gradient[:, t, :].T  # (output_size, batch_size)
            else:
                if t == seq_length - 1:
                    dy_t = output_gradient.T  # 只处理最后一个时间步
                else:
                    dy_t = np.zeros((self.output_size, batch_size))

            # 计算输出层梯度
            dW_hy += np.dot(dy_t, self.hidden_states[t + 1].T)
            db_y += np.sum(dy_t, axis=1, keepdims=True)

            # 计算隐藏状态梯度
            dh_t = np.dot(self.W_hy.T, dy_t) + dh_next

            # 应用tanh梯度
            dtanh = (1 - self.hidden_states[t + 1] ** 2) * dh_t

            # 计算隐藏层梯度
            db_h += np.sum(dtanh, axis=1, keepdims=True)
            dW_xh += np.dot(dtanh, self.input[:, t, :])
            dW_hh += np.dot(dtanh, self.hidden_states[t].T)

            # 更新下一个时间步的隐藏状态梯度
            dh_next = np.dot(self.W_hh.T, dtanh)

        # 平均批次梯度
        dW_xh /= batch_size
        dW_hh /= batch_size
        dW_hy /= batch_size
        db_h /= batch_size
        db_y /= batch_size

        # 更新参数
        self.W_xh -= learning_rate * dW_xh
        self.W_hh -= learning_rate * dW_hh
        self.W_hy -= learning_rate * dW_hy
        self.b_h -= learning_rate * db_h
        self.b_y -= learning_rate * db_y

        # 计算输入梯度
        input_gradient = np.zeros_like(self.input)
        for t in range(seq_length):
            input_gradient[:, t, :] = np.dot(self.W_xh.T, dh_next).T

        return input_gradient

    def get_params(self):
        return {
            'W_xh': self.W_xh.copy(),
            'W_hh': self.W_hh.copy(),
            'W_hy': self.W_hy.copy(),
            'b_h': self.b_h.copy(),
            'b_y': self.b_y.copy()
        }

    def set_params(self, params):
        self.W_xh = params['W_xh']
        self.W_hh = params['W_hh']
        self.W_hy = params['W_hy']
        self.b_h = params['b_h']
        self.b_y = params['b_y']


class LSTM(Layer):
    """LSTM层"""

    def __init__(self, input_size, hidden_size, output_size, return_sequences=True):
        super().__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.return_sequences = return_sequences  # 控制是否返回所有时间步的输出

        # 初始化权重
        self.W_xf = np.random.randn(hidden_size, input_size) * np.sqrt(2 / (input_size + hidden_size))
        self.W_hf = np.random.randn(hidden_size, hidden_size) * np.sqrt(2 / (hidden_size + hidden_size))
        self.W_xi = np.random.randn(hidden_size, input_size) * np.sqrt(2 / (input_size + hidden_size))
        self.W_hi = np.random.randn(hidden_size, hidden_size) * np.sqrt(2 / (hidden_size + hidden_size))
        self.W_xo = np.random.randn(hidden_size, input_size) * np.sqrt(2 / (input_size + hidden_size))
        self.W_ho = np.random.randn(hidden_size, hidden_size) * np.sqrt(2 / (hidden_size + hidden_size))
        self.W_xc = np.random.randn(hidden_size, input_size) * np.sqrt(2 / (input_size + hidden_size))
        self.W_hc = np.random.randn(hidden_size, hidden_size) * np.sqrt(2 / (hidden_size + hidden_size))
        self.W_hy = np.random.randn(output_size, hidden_size) * np.sqrt(2 / (hidden_size + output_size))

        # 初始化偏置
        self.b_f = np.zeros((hidden_size, 1))
        self.b_i = np.zeros((hidden_size, 1))
        self.b_o = np.zeros((hidden_size, 1))
        self.b_c = np.zeros((hidden_size, 1))
        self.b_y = np.zeros((output_size, 1))

        # 存储中间状态
        self.hidden_states = []
        self.cell_states = []
        self.forget_gates = []
        self.input_gates = []
        self.output_gates = []
        self.candidates = []
        self.h_prev = None
        self.c_prev = None

    def _sigmoid(self, x):
        return 1 / (1 + np.exp(-x))

    def _tanh(self, x):
        return np.tanh(x)

    def forward(self, input_data, h_prev=None, c_prev=None):
        """前向传播"""
        self.input = input_data
        batch_size, seq_length, input_size = input_data.shape

        # 初始化隐藏状态和细胞状态
        if h_prev is None:
            h_prev = np.zeros((self.hidden_size, batch_size))
        if c_prev is None:
            c_prev = np.zeros((self.hidden_size, batch_size))

        self.h_prev = h_prev
        self.c_prev = c_prev
        self.hidden_states = [h_prev]
        self.cell_states = [c_prev]

        # 初始化输出
        if self.return_sequences:
            self.output = np.zeros((batch_size, seq_length, self.output_size))
        else:
            self.output = np.zeros((batch_size, self.output_size))

        # 处理序列
        for t in range(seq_length):
            # 获取当前时间步的输入
            x_t = input_data[:, t, :].T  # (input_size, batch_size)

            # 计算门控
            f_t = self._sigmoid(np.dot(self.W_xf, x_t) + np.dot(self.W_hf, h_prev) + self.b_f)
            i_t = self._sigmoid(np.dot(self.W_xi, x_t) + np.dot(self.W_hi, h_prev) + self.b_i)
            o_t = self._sigmoid(np.dot(self.W_xo, x_t) + np.dot(self.W_ho, h_prev) + self.b_o)

            # 计算候选细胞状态
            c_tilde_t = self._tanh(np.dot(self.W_xc, x_t) + np.dot(self.W_hc, h_prev) + self.b_c)

            # 更新细胞状态
            c_t = f_t * c_prev + i_t * c_tilde_t

            # 更新隐藏状态
            h_t = o_t * self._tanh(c_t)

            # 计算输出
            y_t = np.dot(self.W_hy, h_t) + self.b_y

            # 存储结果
            if self.return_sequences:
                self.output[:, t, :] = y_t.T
            else:
                # 只保存最后一个时间步的输出
                if t == seq_length - 1:
                    self.output[:, :] = y_t.T

            self.hidden_states.append(h_t)
            self.cell_states.append(c_t)
            self.forget_gates.append(f_t)
            self.input_gates.append(i_t)
            self.output_gates.append(o_t)
            self.candidates.append(c_tilde_t)

            # 更新前一个状态
            h_prev = h_t
            c_prev = c_t

        return self.output, h_prev, c_prev

    def backward(self, output_gradient, learning_rate):
        """反向传播"""
        batch_size = output_gradient.shape[0]
        seq_length = self.input.shape[1]

        # 初始化梯度
        dW_xf = np.zeros_like(self.W_xf)
        dW_hf = np.zeros_like(self.W_hf)
        dW_xi = np.zeros_like(self.W_xi)
        dW_hi = np.zeros_like(self.W_hi)
        dW_xo = np.zeros_like(self.W_xo)
        dW_ho = np.zeros_like(self.W_ho)
        dW_xc = np.zeros_like(self.W_xc)
        dW_hc = np.zeros_like(self.W_hc)
        dW_hy = np.zeros_like(self.W_hy)

        db_f = np.zeros_like(self.b_f)
        db_i = np.zeros_like(self.b_i)
        db_o = np.zeros_like(self.b_o)
        db_c = np.zeros_like(self.b_c)
        db_y = np.zeros_like(self.b_y)

        # 初始化隐藏状态和细胞状态梯度
        dh_next = np.zeros((self.hidden_size, batch_size))
        dc_next = np.zeros((self.hidden_size, batch_size))

        # 反向处理序列
        for t in reversed(range(seq_length)):
            # 获取当前时间步的梯度
            if self.return_sequences:
                dy_t = output_gradient[:, t, :].T  # (output_size, batch_size)
            else:
                if t == seq_length - 1:
                    dy_t = output_gradient.T  # 只处理最后一个时间步
                else:
                    dy_t = np.zeros((self.output_size, batch_size))

            # 计算输出层梯度
            dW_hy += np.dot(dy_t, self.hidden_states[t + 1].T)
            db_y += np.sum(dy_t, axis=1, keepdims=True)

            # 计算隐藏状态梯度
            dh_t = np.dot(self.W_hy.T, dy_t) + dh_next

            # 计算输出门梯度
            do_t = dh_t * self._tanh(self.cell_states[t + 1])
            do_t = do_t * self.output_gates[t] * (1 - self.output_gates[t])

            # 计算细胞状态梯度
            dc_t = dc_next + dh_t * self.output_gates[t] * (1 - self._tanh(self.cell_states[t + 1]) ** 2)

            # 计算候选细胞状态梯度
            dc_tilde_t = dc_t * self.input_gates[t]
            dc_tilde_t = dc_tilde_t * (1 - self.candidates[t] ** 2)

            # 计算输入门梯度
            di_t = dc_t * self.candidates[t]
            di_t = di_t * self.input_gates[t] * (1 - self.input_gates[t])

            # 计算遗忘门梯度
            df_t = dc_t * self.cell_states[t]
            df_t = df_t * self.forget_gates[t] * (1 - self.forget_gates[t])

            # 计算权重梯度
            dW_xf += np.dot(df_t, self.input[:, t, :])
            dW_hf += np.dot(df_t, self.hidden_states[t].T)
            db_f += np.sum(df_t, axis=1, keepdims=True)

            dW_xi += np.dot(di_t, self.input[:, t, :])
            dW_hi += np.dot(di_t, self.hidden_states[t].T)
            db_i += np.sum(di_t, axis=1, keepdims=True)

            dW_xo += np.dot(do_t, self.input[:, t, :])
            dW_ho += np.dot(do_t, self.hidden_states[t].T)
            db_o += np.sum(do_t, axis=1, keepdims=True)

            dW_xc += np.dot(dc_tilde_t, self.input[:, t, :])
            dW_hc += np.dot(dc_tilde_t, self.hidden_states[t].T)
            db_c += np.sum(dc_tilde_t, axis=1, keepdims=True)

            # 更新下一个时间步的梯度
            dh_next = np.dot(self.W_hf.T, df_t) + np.dot(self.W_hi.T, di_t) + \
                      np.dot(self.W_ho.T, do_t) + np.dot(self.W_hc.T, dc_tilde_t)
            dc_next = dc_t * self.forget_gates[t]

        # 平均批次梯度
        dW_xf /= batch_size
        dW_hf /= batch_size
        dW_xi /= batch_size
        dW_hi /= batch_size
        dW_xo /= batch_size
        dW_ho /= batch_size
        dW_xc /= batch_size
        dW_hc /= batch_size
        dW_hy /= batch_size

        db_f /= batch_size
        db_i /= batch_size
        db_o /= batch_size
        db_c /= batch_size
        db_y /= batch_size

        # 更新参数
        self.W_xf -= learning_rate * dW_xf
        self.W_hf -= learning_rate * dW_hf
        self.W_xi -= learning_rate * dW_xi
        self.W_hi -= learning_rate * dW_hi
        self.W_xo -= learning_rate * dW_xo
        self.W_ho -= learning_rate * dW_ho
        self.W_xc -= learning_rate * dW_xc
        self.W_hc -= learning_rate * dW_hc
        self.W_hy -= learning_rate * dW_hy

        self.b_f -= learning_rate * db_f
        self.b_i -= learning_rate * db_i
        self.b_o -= learning_rate * db_o
        self.b_c -= learning_rate * db_c
        self.b_y -= learning_rate * db_y

        # 计算输入梯度
        input_gradient = np.zeros_like(self.input)
        for t in range(seq_length):
            input_gradient[:, t, :] = np.dot(self.W_xf.T, df_t).T

        return input_gradient

    def get_params(self):
        return {
            'W_xf': self.W_xf.copy(),
            'W_hf': self.W_hf.copy(),
            'W_xi': self.W_xi.copy(),
            'W_hi': self.W_hi.copy(),
            'W_xo': self.W_xo.copy(),
            'W_ho': self.W_ho.copy(),
            'W_xc': self.W_xc.copy(),
            'W_hc': self.W_hc.copy(),
            'W_hy': self.W_hy.copy(),
            'b_f': self.b_f.copy(),
            'b_i': self.b_i.copy(),
            'b_o': self.b_o.copy(),
            'b_c': self.b_c.copy(),
            'b_y': self.b_y.copy()
        }

    def set_params(self, params):
        self.W_xf = params['W_xf']
        self.W_hf = params['W_hf']
        self.W_xi = params['W_xi']
        self.W_hi = params['W_hi']
        self.W_xo = params['W_xo']
        self.W_ho = params['W_ho']
        self.W_xc = params['W_xc']
        self.W_hc = params['W_hc']
        self.W_hy = params['W_hy']
        self.b_f = params['b_f']
        self.b_i = params['b_i']
        self.b_o = params['b_o']
        self.b_c = params['b_c']
        self.b_y = params['b_y']


class GRU(Layer):
    """GRU层"""

    def __init__(self, input_size, hidden_size, output_size, return_sequences=True):
        super().__init__()
        self.input_size = input_size
        self.hidden_size = hidden_size
        self.output_size = output_size
        self.return_sequences = return_sequences  # 控制是否返回所有时间步的输出

        # 初始化权重
        self.W_xz = np.random.randn(hidden_size, input_size) * np.sqrt(2 / (input_size + hidden_size))
        self.W_hz = np.random.randn(hidden_size, hidden_size) * np.sqrt(2 / (hidden_size + hidden_size))
        self.W_xr = np.random.randn(hidden_size, input_size) * np.sqrt(2 / (input_size + hidden_size))
        self.W_hr = np.random.randn(hidden_size, hidden_size) * np.sqrt(2 / (hidden_size + hidden_size))
        self.W_xh = np.random.randn(hidden_size, input_size) * np.sqrt(2 / (input_size + hidden_size))
        self.W_hh = np.random.randn(hidden_size, hidden_size) * np.sqrt(2 / (hidden_size + hidden_size))
        self.W_hy = np.random.randn(output_size, hidden_size) * np.sqrt(2 / (hidden_size + output_size))

        # 初始化偏置
        self.b_z = np.zeros((hidden_size, 1))
        self.b_r = np.zeros((hidden_size, 1))
        self.b_h = np.zeros((hidden_size, 1))
        self.b_y = np.zeros((output_size, 1))

        # 存储中间状态
        self.hidden_states = []
        self.update_gates = []
        self.reset_gates = []
        self.candidates = []
        self.h_prev = None

    def _sigmoid(self, x):
        return 1 / (1 + np.exp(-x))

    def _tanh(self, x):
        return np.tanh(x)

    def forward(self, input_data, h_prev=None):
        """前向传播"""
        self.input = input_data
        batch_size, seq_length, input_size = input_data.shape

        # 初始化隐藏状态
        if h_prev is None:
            h_prev = np.zeros((self.hidden_size, batch_size))

        self.h_prev = h_prev
        self.hidden_states = [h_prev]

        # 初始化输出
        if self.return_sequences:
            self.output = np.zeros((batch_size, seq_length, self.output_size))
        else:
            self.output = np.zeros((batch_size, self.output_size))

        # 处理序列
        for t in range(seq_length):
            # 获取当前时间步的输入
            x_t = input_data[:, t, :].T  # (input_size, batch_size)

            # 计算更新门和重置门
            z_t = self._sigmoid(np.dot(self.W_xz, x_t) + np.dot(self.W_hz, h_prev) + self.b_z)
            r_t = self._sigmoid(np.dot(self.W_xr, x_t) + np.dot(self.W_hr, h_prev) + self.b_r)

            # 计算候选隐藏状态
            h_tilde_t = self._tanh(np.dot(self.W_xh, x_t) + np.dot(self.W_hh, r_t * h_prev) + self.b_h)

            # 更新隐藏状态
            h_t = (1 - z_t) * h_prev + z_t * h_tilde_t

            # 计算输出
            y_t = np.dot(self.W_hy, h_t) + self.b_y

            # 存储结果
            if self.return_sequences:
                self.output[:, t, :] = y_t.T
            else:
                # 只保存最后一个时间步的输出
                if t == seq_length - 1:
                    self.output[:, :] = y_t.T

            self.hidden_states.append(h_t)
            self.update_gates.append(z_t)
            self.reset_gates.append(r_t)
            self.candidates.append(h_tilde_t)

            # 更新前一个状态
            h_prev = h_t

        return self.output, h_prev

    def backward(self, output_gradient, learning_rate):
        """反向传播"""
        batch_size = output_gradient.shape[0]
        seq_length = self.input.shape[1]

        # 初始化梯度
        dW_xz = np.zeros_like(self.W_xz)
        dW_hz = np.zeros_like(self.W_hz)
        dW_xr = np.zeros_like(self.W_xr)
        dW_hr = np.zeros_like(self.W_hr)
        dW_xh = np.zeros_like(self.W_xh)
        dW_hh = np.zeros_like(self.W_hh)
        dW_hy = np.zeros_like(self.W_hy)

        db_z = np.zeros_like(self.b_z)
        db_r = np.zeros_like(self.b_r)
        db_h = np.zeros_like(self.b_h)
        db_y = np.zeros_like(self.b_y)

        # 初始化隐藏状态梯度
        dh_next = np.zeros((self.hidden_size, batch_size))

        # 反向处理序列
        for t in reversed(range(seq_length)):
            # 获取当前时间步的梯度
            if self.return_sequences:
                dy_t = output_gradient[:, t, :].T  # (output_size, batch_size)
            else:
                if t == seq_length - 1:
                    dy_t = output_gradient.T  # 只处理最后一个时间步
                else:
                    dy_t = np.zeros((self.output_size, batch_size))

            # 计算输出层梯度
            dW_hy += np.dot(dy_t, self.hidden_states[t + 1].T)
            db_y += np.sum(dy_t, axis=1, keepdims=True)

            # 计算隐藏状态梯度
            dh_t = np.dot(self.W_hy.T, dy_t) + dh_next

            # 计算更新门梯度
            dz_t = dh_t * (self.candidates[t] - self.hidden_states[t])
            dz_t = dz_t * self.update_gates[t] * (1 - self.update_gates[t])

            # 计算候选隐藏状态梯度
            dh_tilde_t = dh_t * self.update_gates[t]
            dh_tilde_t = dh_tilde_t * (1 - self.candidates[t] ** 2)

            # 计算重置门梯度
            dr_t = np.dot(self.W_hh.T, dh_tilde_t) * self.hidden_states[t]
            dr_t = dr_t * self.reset_gates[t] * (1 - self.reset_gates[t])

            # 计算权重梯度
            dW_xz += np.dot(dz_t, self.input[:, t, :])
            dW_hz += np.dot(dz_t, self.hidden_states[t].T)
            db_z += np.sum(dz_t, axis=1, keepdims=True)

            dW_xr += np.dot(dr_t, self.input[:, t, :])
            dW_hr += np.dot(dr_t, self.hidden_states[t].T)
            db_r += np.sum(dr_t, axis=1, keepdims=True)

            dW_xh += np.dot(dh_tilde_t, self.input[:, t, :])
            dW_hh += np.dot(dh_tilde_t, (self.reset_gates[t] * self.hidden_states[t]).T)
            db_h += np.sum(dh_tilde_t, axis=1, keepdims=True)

            # 更新下一个时间步的梯度
            dh_next = np.dot(self.W_hz.T, dz_t) + np.dot(self.W_hr.T, dr_t) + \
                      dh_t * (1 - self.update_gates[t]) + \
                      np.dot(self.W_hh.T, dh_tilde_t) * self.reset_gates[t]

        # 平均批次梯度
        dW_xz /= batch_size
        dW_hz /= batch_size
        dW_xr /= batch_size
        dW_hr /= batch_size
        dW_xh /= batch_size
        dW_hh /= batch_size
        dW_hy /= batch_size

        db_z /= batch_size
        db_r /= batch_size
        db_h /= batch_size
        db_y /= batch_size

        # 更新参数
        self.W_xz -= learning_rate * dW_xz
        self.W_hz -= learning_rate * dW_hz
        self.W_xr -= learning_rate * dW_xr
        self.W_hr -= learning_rate * dW_hr
        self.W_xh -= learning_rate * dW_xh
        self.W_hh -= learning_rate * dW_hh
        self.W_hy -= learning_rate * dW_hy

        self.b_z -= learning_rate * db_z
        self.b_r -= learning_rate * db_r
        self.b_h -= learning_rate * db_h
        self.b_y -= learning_rate * db_y

        # 计算输入梯度
        input_gradient = np.zeros_like(self.input)
        for t in range(seq_length):
            input_gradient[:, t, :] = np.dot(self.W_xz.T, dz_t).T

        return input_gradient

    def get_params(self):
        return {
            'W_xz': self.W_xz.copy(),
            'W_hz': self.W_hz.copy(),
            'W_xr': self.W_xr.copy(),
            'W_hr': self.W_hr.copy(),
            'W_xh': self.W_xh.copy(),
            'W_hh': self.W_hh.copy(),
            'W_hy': self.W_hy.copy(),
            'b_z': self.b_z.copy(),
            'b_r': self.b_r.copy(),
            'b_h': self.b_h.copy(),
            'b_y': self.b_y.copy()
        }

    def set_params(self, params):
        self.W_xz = params['W_xz']
        self.W_hz = params['W_hz']
        self.W_xr = params['W_xr']
        self.W_hr = params['W_hr']
        self.W_xh = params['W_xh']
        self.W_hh = params['W_hh']
        self.W_hy = params['W_hy']
        self.b_z = params['b_z']
        self.b_r = params['b_r']
        self.b_h = params['b_h']
        self.b_y = params['b_y']
