import numpy as np
from keras.datasets import mnist
from keras.utils import to_categorical

# ========== 激活函数及其导数 ==========


def relu(x):
    """ReLU 激活函数：大于0输出自身，小于等于0输出0"""
    return np.maximum(0, x)


def relu_derivative(x):
    """ReLU 的导数：大于0为1，其他为0"""
    return (x > 0).astype(float)


def softmax(x):
    """Softmax 激活函数，用于输出层将向量转为概率分布"""
    e_x = np.exp(x - np.max(x, axis=1, keepdims=True))  # 防止溢出
    return e_x / np.sum(e_x, axis=1, keepdims=True)


# ========== 损失函数：交叉熵 ==========


def cross_entropy(y_pred, y_true):
    """交叉熵损失函数，度量预测分布与真实分布的差异"""
    return -np.sum(y_true * np.log(y_pred + 1e-8)) / y_pred.shape[0]  # 加1e-8避免log(0)


def cross_entropy_derivative(y_pred, y_true):
    """交叉熵对预测值的导数"""
    return (y_pred - y_true) / y_pred.shape[0]


# ========== 参数初始化 ==========


def initialize_parameters():
    """初始化权重和偏置"""
    np.random.seed(42)
    params = {
        "W1": np.random.randn(784, 512) * 0.01,  # 输入层到隐藏层1
        "b1": np.zeros((1, 512)),
        "W2": np.random.randn(512, 128) * 0.01,  # 隐藏层1到隐藏层2
        "b2": np.zeros((1, 128)),
        "W3": np.random.randn(128, 10) * 0.01,  # 隐藏层2到输出层
        "b3": np.zeros((1, 10)),
    }
    return params


# ========== 前向传播 ==========


def forward_pass(X, params):
    """完成一轮前向传播，返回输出值和中间缓存"""
    Z1 = np.dot(X, params["W1"]) + params["b1"]  # 第一层线性变换
    A1 = relu(Z1)  # 第一层激活
    Z2 = np.dot(A1, params["W2"]) + params["b2"]  # 第二层线性变换
    A2 = relu(Z2)  # 第二层激活
    Z3 = np.dot(A2, params["W3"]) + params["b3"]  # 输出层线性变换
    A3 = softmax(Z3)  # 输出层softmax
    cache = (X, Z1, A1, Z2, A2, Z3, A3)  # 缓存用于反向传播
    return A3, cache


# ========== 反向传播 ==========


def backward_pass(cache, params, y_true):
    """反向传播，计算梯度"""
    X, Z1, A1, Z2, A2, Z3, A3 = cache

    # 输出层梯度
    dZ3 = cross_entropy_derivative(A3, y_true)
    dW3 = np.dot(A2.T, dZ3)
    db3 = np.sum(dZ3, axis=0, keepdims=True)

    # 第二隐藏层梯度
    dA2 = np.dot(dZ3, params["W3"].T)
    dZ2 = dA2 * relu_derivative(Z2)
    dW2 = np.dot(A1.T, dZ2)
    db2 = np.sum(dZ2, axis=0, keepdims=True)

    # 第一隐藏层梯度
    dA1 = np.dot(dZ2, params["W2"].T)
    dZ1 = dA1 * relu_derivative(Z1)
    dW1 = np.dot(X.T, dZ1)
    db1 = np.sum(dZ1, axis=0, keepdims=True)

    grads = {"dW1": dW1, "db1": db1, "dW2": dW2, "db2": db2, "dW3": dW3, "db3": db3}
    return grads


# ========== 参数更新 ==========


def update_parameters(params, grads, lr=0.01):
    """使用梯度下降法更新参数"""
    for key in params:
        params[key] -= lr * grads["d" + key]  # 关键：参数 -= 学习率 * 对应梯度
    return params


# ========== 训练主流程 ==========


def train(X_train, y_train, X_test, y_test, epochs=10, batch_size=64, lr=0.1):
    """主训练循环：小批量梯度下降 + 每轮测试评估"""
    params = initialize_parameters()

    for epoch in range(epochs):
        # 打乱训练数据顺序
        permutation = np.random.permutation(X_train.shape[0])
        X_train = X_train[permutation]
        y_train = y_train[permutation]

        for i in range(0, X_train.shape[0], batch_size):
            X_batch = X_train[i : i + batch_size]
            y_batch = y_train[i : i + batch_size]

            # 前向传播
            y_pred, cache = forward_pass(X_batch, params)
            loss = cross_entropy(y_pred, y_batch)

            # 反向传播 + 参数更新
            grads = backward_pass(cache, params, y_batch)
            params = update_parameters(params, grads, lr)

        # 每轮结束后计算测试集准确率
        test_pred, _ = forward_pass(X_test, params)
        acc = np.mean(np.argmax(test_pred, axis=1) == np.argmax(y_test, axis=1))
        print(f"Epoch {epoch+1}, Loss: {loss:.4f}, Test Accuracy: {acc:.4f}")


# ========== 加载数据集 ==========

(X_train, y_train), (X_test, y_test) = mnist.load_data()  # 下载 MNIST 数据
X_train = X_train.reshape(-1, 784).astype(np.float32) / 255  # 展平 + 归一化
X_test = X_test.reshape(-1, 784).astype(np.float32) / 255
y_train = to_categorical(y_train, 10)  # One-hot 编码
y_test = to_categorical(y_test, 10)

# ========== 启动训练 ==========

train(X_train, y_train, X_test, y_test, epochs=10, batch_size=64, lr=0.1)
