import numpy as np
import matplotlib.pyplot as plt


# 生成简单的线性数据集
def generate_data(num_samples=100):
    np.random.seed(42)
    X = np.random.rand(num_samples, 1) * 4 - 2  # [-2, 2]范围
    y = 0.5 * X ** 3 + 0.8 * X + np.random.randn(num_samples, 1) * 0.1
    return X, y


# 激活函数及其导数
def relu(x):
    return np.maximum(0, x)


def relu_derivative(x):
    return (x > 0).astype(float)


def linear(x):
    return x


def linear_derivative(x):
    return np.ones_like(x)


# 初始化权重
def initialize_weights(layers):
    weights = []
    biases = []
    for i in range(len(layers) - 1):
        # He初始化 - 适合ReLU
        std = np.sqrt(2. / layers[i])
        w = np.random.randn(layers[i], layers[i + 1]) * std
        b = np.zeros((1, layers[i + 1]))
        weights.append(w)
        biases.append(b)
    return weights, biases


# 前向传播
def forward_propagation(X, weights, biases):
    activations = [X]
    pre_activations = []
    a = X

    for i in range(len(weights) - 1):
        z = np.dot(a, weights[i]) + biases[i]
        a = relu(z)
        pre_activations.append(z)
        activations.append(a)

    # 输出层 - 线性激活
    z = np.dot(a, weights[-1]) + biases[-1]
    a = linear(z)
    pre_activations.append(z)
    activations.append(a)

    return activations, pre_activations


# 计算均方误差损失
def mse_loss(y_true, y_pred):
    return np.mean((y_true - y_pred) ** 2)


# 反向传播
def backward_propagation(X, y, activations, pre_activations, weights):
    m = X.shape[0]
    grads = []

    # 输出层误差
    dA = 2 * (activations[-1] - y) / m
    dZ = dA * linear_derivative(pre_activations[-1])
    grads.append(dZ)

    # 隐藏层误差
    for i in range(len(weights) - 2, -1, -1):  # 从后往前
        dA = np.dot(dZ, weights[i + 1].T)
        dZ = dA * relu_derivative(pre_activations[i])
        grads.append(dZ)

    # 反转梯度列表使其与权重顺序一致
    grads = grads[::-1]

    # 计算权重梯度
    weight_gradients = []
    bias_gradients = []
    for i in range(len(weights)):
        dW = np.dot(activations[i].T, grads[i])
        dB = np.sum(grads[i], axis=0, keepdims=True)
        weight_gradients.append(dW)
        bias_gradients.append(dB)

    return weight_gradients, bias_gradients


# RMSprop优化器
class RMSprop:
    def __init__(self, learning_rate=0.001, rho=0.9, epsilon=1e-8):
        self.lr = learning_rate
        self.rho = rho
        self.epsilon = epsilon
        self.cache_w = None
        self.cache_b = None

    def update(self, weights, biases, weight_grads, bias_grads):
        # 初始化缓存
        if self.cache_w is None:
            self.cache_w = [np.zeros_like(w) for w in weights]
            self.cache_b = [np.zeros_like(b) for b in biases]

        new_weights = []
        new_biases = []

        for i in range(len(weights)):
            # 更新权重缓存
            self.cache_w[i] = self.rho * self.cache_w[i] + (1 - self.rho) * weight_grads[i] ** 2
            # 更新权重
            new_w = weights[i] - self.lr * weight_grads[i] / (np.sqrt(self.cache_w[i]) + self.epsilon)

            # 更新偏置缓存
            self.cache_b[i] = self.rho * self.cache_b[i] + (1 - self.rho) * bias_grads[i] ** 2
            # 更新偏置
            new_b = biases[i] - self.lr * bias_grads[i] / (np.sqrt(self.cache_b[i]) + self.epsilon)

            new_weights.append(new_w)
            new_biases.append(new_b)

        return new_weights, new_biases


# 训练神经网络
def train_network(X, y, layers, epochs=1000, learning_rate=0.01, rho=0.9):
    # 初始化权重
    weights, biases = initialize_weights(layers)

    # 初始化优化器
    optimizer = RMSprop(learning_rate=learning_rate, rho=rho)

    # 存储训练历史
    loss_history = []

    for epoch in range(epochs):
        # 前向传播
        activations, pre_activations = forward_propagation(X, weights, biases)
        y_pred = activations[-1]

        # 计算损失
        loss = mse_loss(y, y_pred)
        loss_history.append(loss)

        # 每100轮打印一次损失
        if epoch % 100 == 0:
            print(f"Epoch {epoch}, Loss: {loss:.6f}")

        # 反向传播
        weight_grads, bias_grads = backward_propagation(
            X, y, activations, pre_activations, weights
        )

        # 使用RMSprop更新权重
        weights, biases = optimizer.update(weights, biases, weight_grads, bias_grads)

    return weights, biases, loss_history


# 可视化结果
def plot_results(X, y, weights, biases, loss_history):
    plt.figure(figsize=(15, 5))

    # 1. 原始数据与预测
    plt.subplot(1, 2, 1)
    plt.scatter(X, y, label='True data', alpha=0.6)

    # 生成预测
    X_test = np.linspace(-2, 2, 100).reshape(-1, 1)
    activations, _ = forward_propagation(X_test, weights, biases)
    y_pred = activations[-1]

    plt.plot(X_test, y_pred, 'r-', linewidth=2, label='Prediction')
    plt.title('Data and Model Prediction')
    plt.xlabel('X')
    plt.ylabel('y')
    plt.legend()
    plt.grid(True)

    # 2. 损失曲线
    plt.subplot(1, 2, 2)
    plt.plot(loss_history)
    plt.yscale('log')
    plt.title('Training Loss (Log Scale)')
    plt.xlabel('Epoch')
    plt.ylabel('Loss (log scale)')
    plt.grid(True)

    plt.tight_layout()
    plt.show()


# 主程序
if __name__ == "__main__":
    # 创建数据集
    X, y = generate_data(200)

    # 网络结构: 输入层(1) -> 隐藏层1(64) -> 隐藏层2(32) -> 输出层(1)
    layers = [1, 64, 32, 1]

    print("开始训练...")
    weights, biases, loss_history = train_network(
        X, y, layers,
        epochs=2000,
        learning_rate=0.001,
        rho=0.9
    )

    print("训练完成!")
    plot_results(X, y, weights, biases, loss_history)