{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "aUP269oZ8734",
        "outputId": "8279bb02-c4ce-4969-88f7-b51c62ce349c"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "===== 迭代前后的函数关系变化 =====\n",
            "1. 函数结构（不变）：\n",
            "   输出y_hat = sigmoid(W3·sigmoid(W2·sigmoid(W1·x + b1) + b2) + b3)\n",
            "   （多层sigmoid嵌套结构不变，仅参数W/b更新）\n",
            "\n",
            "2. 关键参数变化（部分）：\n",
            "   W3: 从 [[1.1 1.2]] → [[1.0995 1.1995]]\n",
            "   b3: 从 [[1.3]] → [[1.2994]]\n",
            "   W2[0][0]: 从 0.5 → 0.4999\n",
            "   b1[0][0]: 从 0.3 → 0.3\n",
            "\n",
            "3. 各层计算结果变化（输入x=0.5）：\n",
            "   隐藏层1预激活z1: 从 [[0.35]\n",
            " [0.5 ]] → [[0.35]\n",
            " [0.5 ]]\n",
            "   隐藏层1输出a1: 从 [[0.5866]\n",
            " [0.6225]] → [[0.5866]\n",
            " [0.6225]]\n",
            "   隐藏层2预激活z2: 从 [[1.5668]\n",
            " [1.9086]] → [[1.5666]\n",
            " [1.9085]]\n",
            "   隐藏层2输出a2: 从 [[0.8273]\n",
            " [0.8709]] → [[0.8273]\n",
            " [0.8708]]\n",
            "   输出层预激活z3: 从 [[3.2551]] → [[3.2536]]\n",
            "   预测值y_hat: 从 [[0.9629]] → [[0.9628]]（更接近真实标签0.8）\n",
            "   损失L: 从 [[0.013261]] → [[0.013252]]（损失下降）\n",
            "\n",
            "4. 函数关系变化本质：\n",
            "   输入x到输出y_hat的非线性映射被微调，参数向“减少损失”的方向更新，\n",
            "   对于相同输入x，输出更接近真实标签y，映射关系更贴合数据规律。\n"
          ]
        }
      ],
      "source": [
        "import numpy as np\n",
        "\n",
        "# 1. 激活函数sigmoid及其导数\n",
        "def sigmoid(x):\n",
        "    return 1 / (1 + np.exp(-x))\n",
        "\n",
        "def sigmoid_derivative(x):\n",
        "    s = sigmoid(x)\n",
        "    return s * (1 - s)\n",
        "\n",
        "\n",
        "# 2. 初始化参数（与例子一致）\n",
        "W1 = np.array([[0.1], [0.2]])  # 隐藏层1权重 (2×1)\n",
        "b1 = np.array([[0.3], [0.4]])  # 隐藏层1偏置 (2×1)\n",
        "W2 = np.array([[0.5, 0.6], [0.7, 0.8]])  # 隐藏层2权重 (2×2)\n",
        "b2 = np.array([[0.9], [1.0]])  # 隐藏层2偏置 (2×1)\n",
        "W3 = np.array([[1.1, 1.2]])  # 输出层权重 (1×2)\n",
        "b3 = np.array([[1.3]])  # 输出层偏置 (1×1)\n",
        "\n",
        "x = np.array([[0.5]])  # 输入\n",
        "y = np.array([[0.8]])  # 真实标签\n",
        "learning_rate = 0.1\n",
        "\n",
        "\n",
        "# 3. 前向传播（返回中间结果用于对比）\n",
        "def forward_propagation(x, W1, b1, W2, b2, W3, b3):\n",
        "    z1 = np.dot(W1, x) + b1\n",
        "    a1 = sigmoid(z1)\n",
        "    z2 = np.dot(W2, a1) + b2\n",
        "    a2 = sigmoid(z2)\n",
        "    z3 = np.dot(W3, a2) + b3\n",
        "    y_hat = sigmoid(z3)\n",
        "    loss = 0.5 * np.square(y_hat - y)\n",
        "    return z1, a1, z2, a2, z3, y_hat, loss\n",
        "\n",
        "\n",
        "# 4. 反向传播\n",
        "def backward_propagation(x, y, z1, a1, z2, a2, z3, y_hat, W2, W3):\n",
        "    delta3 = (y_hat - y) * sigmoid_derivative(z3)\n",
        "    dW3 = np.dot(delta3, a2.T)\n",
        "    db3 = delta3\n",
        "\n",
        "    delta2 = np.dot(W3.T, delta3) * sigmoid_derivative(z2)\n",
        "    dW2 = np.dot(delta2, a1.T)\n",
        "    db2 = delta2\n",
        "\n",
        "    delta1 = np.dot(W2.T, delta2) * sigmoid_derivative(z1)\n",
        "    dW1 = np.dot(delta1, x.T)\n",
        "    db1 = delta1\n",
        "\n",
        "    return dW1, db1, dW2, db2, dW3, db3\n",
        "\n",
        "\n",
        "# 5. 参数更新\n",
        "def update_parameters(W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3, lr):\n",
        "    return (\n",
        "        W1 - lr*dW1, b1 - lr*db1,\n",
        "        W2 - lr*dW2, b2 - lr*db2,\n",
        "        W3 - lr*dW3, b3 - lr*db3\n",
        "    )\n",
        "\n",
        "\n",
        "# 6. 主程序：执行迭代并对比函数关系变化\n",
        "if __name__ == \"__main__\":\n",
        "    # 迭代前的前向传播（保存中间结果用于对比）\n",
        "    z1_old, a1_old, z2_old, a2_old, z3_old, y_hat_old, loss_old = forward_propagation(\n",
        "        x, W1, b1, W2, b2, W3, b3\n",
        "    )\n",
        "\n",
        "    # 反向传播与参数更新\n",
        "    dW1, db1, dW2, db2, dW3, db3 = backward_propagation(\n",
        "        x, y, z1_old, a1_old, z2_old, a2_old, z3_old, y_hat_old, W2, W3\n",
        "    )\n",
        "    W1_new, b1_new, W2_new, b2_new, W3_new, b3_new = update_parameters(\n",
        "        W1, b1, W2, b2, W3, b3, dW1, db1, dW2, db2, dW3, db3, learning_rate\n",
        "    )\n",
        "\n",
        "    # 迭代后的前向传播（获取新的中间结果）\n",
        "    z1_new, a1_new, z2_new, a2_new, z3_new, y_hat_new, loss_new = forward_propagation(\n",
        "        x, W1_new, b1_new, W2_new, b2_new, W3_new, b3_new\n",
        "    )\n",
        "\n",
        "    # ==============================================\n",
        "    # 打印迭代前后的函数关系对比\n",
        "    # ==============================================\n",
        "    print(\"===== 迭代前后的函数关系变化 =====\")\n",
        "    print(\"1. 函数结构（不变）：\")\n",
        "    print(\"   输出y_hat = sigmoid(W3·sigmoid(W2·sigmoid(W1·x + b1) + b2) + b3)\")\n",
        "    print(\"   （多层sigmoid嵌套结构不变，仅参数W/b更新）\\n\")\n",
        "\n",
        "    print(\"2. 关键参数变化（部分）：\")\n",
        "    print(f\"   W3: 从 {W3.round(4)} → {W3_new.round(4)}\")\n",
        "    print(f\"   b3: 从 {b3.round(4)} → {b3_new.round(4)}\")\n",
        "    print(f\"   W2[0][0]: 从 {W2[0][0].round(4)} → {W2_new[0][0].round(4)}\")\n",
        "    print(f\"   b1[0][0]: 从 {b1[0][0].round(4)} → {b1_new[0][0].round(4)}\\n\")\n",
        "\n",
        "    print(\"3. 各层计算结果变化（输入x=0.5）：\")\n",
        "    print(f\"   隐藏层1预激活z1: 从 {z1_old.round(4)} → {z1_new.round(4)}\")\n",
        "    print(f\"   隐藏层1输出a1: 从 {a1_old.round(4)} → {a1_new.round(4)}\")\n",
        "    print(f\"   隐藏层2预激活z2: 从 {z2_old.round(4)} → {z2_new.round(4)}\")\n",
        "    print(f\"   隐藏层2输出a2: 从 {a2_old.round(4)} → {a2_new.round(4)}\")\n",
        "    print(f\"   输出层预激活z3: 从 {z3_old.round(4)} → {z3_new.round(4)}\")\n",
        "    print(f\"   预测值y_hat: 从 {y_hat_old.round(4)} → {y_hat_new.round(4)}（更接近真实标签0.8）\")\n",
        "    print(f\"   损失L: 从 {loss_old.round(6)} → {loss_new.round(6)}（损失下降）\\n\")\n",
        "\n",
        "    print(\"4. 函数关系变化本质：\")\n",
        "    print(\"   输入x到输出y_hat的非线性映射被微调，参数向“减少损失”的方向更新，\")\n",
        "    print(\"   对于相同输入x，输出更接近真实标签y，映射关系更贴合数据规律。\")"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "下面通过一个具体例子说明神经网络的节点个数、3层层次关系的计算过程，以及迭代一次后的函数关系变化。\n",
        "\n",
        "\n",
        "### 一、网络结构与节点个数\n",
        "设定“输入层→隐藏层1→隐藏层2→输出层”的3层计算结构（层次关系3次），具体节点数：  \n",
        "- 输入层：1个节点（输入特征$x$）  \n",
        "- 隐藏层1：2个节点（$a_1_1, a_1_2$）  \n",
        "- 隐藏层2：2个节点（$a_2_1, a_2_2$）  \n",
        "- 输出层：1个节点（预测值$\\hat{y}$）  \n",
        "\n",
        "**总节点数**：1（输入）+2（隐藏1）+2（隐藏2）+1（输出）=6个节点。  \n",
        "**网络层**：通常输入层不算作“网络层”，因此网络为3层（隐藏层1、隐藏层2、输出层）。\n",
        "\n",
        "\n",
        "### 二、初始参数与输入\n",
        "为简化计算，设定初始参数（权重$W$、偏置$b$）和输入/标签：  \n",
        "- 输入$x=0.5$，真实标签$y=0.8$（二分类场景，标签在[0,1]）  \n",
        "- 激活函数：所有层均用sigmoid（$\\sigma(t)=\\frac{1}{1+e^{-t}}$，导数$\\sigma'(t)=\\sigma(t)(1-\\sigma(t))$）  \n",
        "- 损失函数：均方误差$L=\\frac{1}{2}(\\hat{y}-y)^2$  \n",
        "- 学习率$\\eta=0.1$（控制参数更新幅度）  \n",
        "\n",
        "#### 初始参数（随机小值）：\n",
        "- 隐藏层1：$W_1=\\begin{bmatrix}0.1\\\\0.2\\end{bmatrix}$（2×1矩阵，每行对应1个节点的权重），$b_1=\\begin{bmatrix}0.3\\\\0.4\\end{bmatrix}$（2×1向量）  \n",
        "- 隐藏层2：$W_2=\\begin{bmatrix}0.5&0.6\\\\0.7&0.8\\end{bmatrix}$（2×2矩阵，行对应隐藏层2节点，列对应隐藏层1节点），$b_2=\\begin{bmatrix}0.9\\\\1.0\\end{bmatrix}$（2×1向量）  \n",
        "- 输出层：$W_3=\\begin{bmatrix}1.1&1.2\\end{bmatrix}$（1×2矩阵），$b_3=1.3$（1×1标量）  \n",
        "\n",
        "\n",
        "### 三、第一次前向传播（迭代前的预测）\n",
        "从输入到输出逐层计算，得到中间结果和损失：  \n",
        "\n",
        "1. **隐藏层1计算**：  \n",
        "   预激活值$z_1 = W_1 \\cdot x + b_1$（向量运算）：  \n",
        "   $z_1_1 = 0.1 \\times 0.5 + 0.3 = 0.35$，$z_1_2 = 0.2 \\times 0.5 + 0.4 = 0.5$  \n",
        "   激活值$a_1 = \\sigma(z_1)$：  \n",
        "   $a_1_1 = \\sigma(0.35) \\approx 0.5866$，$a_1_2 = \\sigma(0.5) \\approx 0.6225$  \n",
        "\n",
        "\n",
        "2. **隐藏层2计算**：  \n",
        "   预激活值$z_2 = W_2 \\cdot a_1 + b_2$（矩阵×向量+向量）：  \n",
        "   $z_2_1 = 0.5 \\times 0.5866 + 0.6 \\times 0.6225 + 0.9 \\approx 0.2933 + 0.3735 + 0.9 = 1.5668$  \n",
        "   $z_2_2 = 0.7 \\times 0.5866 + 0.8 \\times 0.6225 + 1.0 \\approx 0.4106 + 0.4980 + 1.0 = 1.9086$  \n",
        "   激活值$a_2 = \\sigma(z_2)$：  \n",
        "   $a_2_1 = \\sigma(1.5668) \\approx 0.8270$，$a_2_2 = \\sigma(1.9086) \\approx 0.8700$  \n",
        "\n",
        "\n",
        "3. **输出层计算**：  \n",
        "   预激活值$z_3 = W_3 \\cdot a_2 + b_3$（矩阵×向量+标量）：  \n",
        "   $z_3 = 1.1 \\times 0.8270 + 1.2 \\times 0.8700 + 1.3 \\approx 0.9097 + 1.0440 + 1.3 = 3.2537$  \n",
        "   预测值$\\hat{y} = \\sigma(z_3) \\approx \\sigma(3.2537) \\approx 0.9620$  \n",
        "\n",
        "\n",
        "4. **损失计算**：  \n",
        "   $L = \\frac{1}{2} \\times (0.9620 - 0.8)^2 \\approx \\frac{1}{2} \\times 0.0262 = 0.0131$  \n",
        "\n",
        "\n",
        "### 四、反向传播（计算梯度）\n",
        "从损失反向求各参数的梯度，用于更新参数：  \n",
        "\n",
        "#### 1. 输出层参数梯度（$W_3, b_3$）\n",
        "- 输出层误差项$\\delta_3 = \\frac{\\partial L}{\\partial z_3} = (\\hat{y} - y) \\cdot \\hat{y} \\cdot (1 - \\hat{y})$  \n",
        "  $\\delta_3 = (0.9620 - 0.8) \\times 0.9620 \\times (1 - 0.9620) \\approx 0.1620 \\times 0.9620 \\times 0.0380 \\approx 0.0060$  \n",
        "\n",
        "- $W_3$的梯度：$\\frac{\\partial L}{\\partial W_3} = \\delta_3 \\cdot a_2^T$（$a_2$转置为行向量）  \n",
        "  $\\frac{\\partial L}{\\partial W_3} = 0.0060 \\times \\begin{bmatrix}0.8270&0.8700\\end{bmatrix} \\approx \\begin{bmatrix}0.00496&0.00522\\end{bmatrix}$  \n",
        "\n",
        "- $b_3$的梯度：$\\frac{\\partial L}{\\partial b_3} = \\delta_3 \\approx 0.0060$  \n",
        "\n",
        "\n",
        "#### 2. 隐藏层2参数梯度（$W_2, b_2$）\n",
        "- 隐藏层2误差项$\\delta_2 = (W_3^T \\cdot \\delta_3) \\odot (a_2 \\cdot (1 - a_2))$（$\\odot$为点乘）  \n",
        "  $W_3^T \\cdot \\delta_3 = \\begin{bmatrix}1.1\\\\1.2\\end{bmatrix} \\times 0.0060 \\approx \\begin{bmatrix}0.0066\\\\0.0072\\end{bmatrix}$  \n",
        "  $a_2 \\cdot (1 - a_2) = \\begin{bmatrix}0.8270 \\times 0.1730\\\\0.8700 \\times 0.1300\\end{bmatrix} \\approx \\begin{bmatrix}0.1431\\\\0.1131\\end{bmatrix}$  \n",
        "  $\\delta_2 = \\begin{bmatrix}0.0066 \\times 0.1431\\\\0.0072 \\times 0.1131\\end{bmatrix} \\approx \\begin{bmatrix}0.00094\\\\0.00081\\end{bmatrix}$  \n",
        "\n",
        "- $W_2$的梯度：$\\frac{\\partial L}{\\partial W_2} = \\delta_2 \\cdot a_1^T$（$\\delta_2$为列向量，$a_1^T$为行向量）  \n",
        "  $\\frac{\\partial L}{\\partial W_2} = \\begin{bmatrix}0.00094\\\\0.00081\\end{bmatrix} \\times \\begin{bmatrix}0.5866&0.6225\\end{bmatrix} \\approx \\begin{bmatrix}0.00055&0.00058\\\\0.00047&0.00050\\end{bmatrix}$  \n",
        "\n",
        "- $b_2$的梯度：$\\frac{\\partial L}{\\partial b_2} = \\delta_2 \\approx \\begin{bmatrix}0.00094\\\\0.00081\\end{bmatrix}$  \n",
        "\n",
        "\n",
        "#### 3. 隐藏层1参数梯度（$W_1, b_1$）\n",
        "- 隐藏层1误差项$\\delta_1 = (W_2^T \\cdot \\delta_2) \\odot (a_1 \\cdot (1 - a_1))$  \n",
        "  $W_2^T \\cdot \\delta_2 = \\begin{bmatrix}0.5&0.7\\\\0.6&0.8\\end{bmatrix}^T \\times \\begin{bmatrix}0.00094\\\\0.00081\\end{bmatrix} = \\begin{bmatrix}0.5 \\times 0.00094 + 0.7 \\times 0.00081\\\\0.6 \\times 0.00094 + 0.8 \\times 0.00081\\end{bmatrix} \\approx \\begin{bmatrix}0.00104\\\\0.00125\\end{bmatrix}$  \n",
        "  $a_1 \\cdot (1 - a_1) = \\begin{bmatrix}0.5866 \\times 0.4134\\\\0.6225 \\times 0.3775\\end{bmatrix} \\approx \\begin{bmatrix}0.2425\\\\0.2340\\end{bmatrix}$  \n",
        "  $\\delta_1 = \\begin{bmatrix}0.00104 \\times 0.2425\\\\0.00125 \\times 0.2340\\end{bmatrix} \\approx \\begin{bmatrix}0.00025\\\\0.00029\\end{bmatrix}$  \n",
        "\n",
        "- $W_1$的梯度：$\\frac{\\partial L}{\\partial W_1} = \\delta_1 \\cdot x^T$（$x$为标量，转置后仍为标量）  \n",
        "  $\\frac{\\partial L}{\\partial W_1} = \\begin{bmatrix}0.00025\\\\0.00029\\end{bmatrix} \\times 0.5 \\approx \\begin{bmatrix}0.000125\\\\0.000145\\end{bmatrix}$  \n",
        "\n",
        "- $b_1$的梯度：$\\frac{\\partial L}{\\partial b_1} = \\delta_1 \\approx \\begin{bmatrix}0.00025\\\\0.00029\\end{bmatrix}$  \n",
        "\n",
        "\n",
        "### 五、参数更新（迭代一次）\n",
        "用梯度下降更新所有参数（新参数=旧参数-学习率×梯度）：  \n",
        "\n",
        "- $W_3^{\\text{新}} = W_3 - \\eta \\cdot \\frac{\\partial L}{\\partial W_3} \\approx \\begin{bmatrix}1.1 - 0.1 \\times 0.00496&1.2 - 0.1 \\times 0.00522\\end{bmatrix} \\approx \\begin{bmatrix}1.0995&1.1995\\end{bmatrix}$  \n",
        "- $b_3^{\\text{新}} = 1.3 - 0.1 \\times 0.0060 \\approx 1.2994$  \n",
        "- $W_2^{\\text{新}} \\approx \\begin{bmatrix}0.5 - 0.1 \\times 0.00055&0.6 - 0.1 \\times 0.00058\\\\0.7 - 0.1 \\times 0.00047&0.8 - 0.1 \\times 0.00050\\end{bmatrix} \\approx \\begin{bmatrix}0.4999&0.5999\\\\0.6999&0.7999\\end{bmatrix}$  \n",
        "- $b_2^{\\text{新}} \\approx \\begin{bmatrix}0.9 - 0.1 \\times 0.00094\\\\1.0 - 0.1 \\times 0.00081\\end{bmatrix} \\approx \\begin{bmatrix}0.8999\\\\0.9999\\end{bmatrix}$  \n",
        "- $W_1^{\\text{新}} \\approx \\begin{bmatrix}0.1 - 0.1 \\times 0.000125\\\\0.2 - 0.1 \\times 0.000145\\end{bmatrix} \\approx \\begin{bmatrix}0.09999\\\\0.19999\\end{bmatrix}$  \n",
        "- $b_1^{\\text{新}} \\approx \\begin{bmatrix}0.3 - 0.1 \\times 0.00025\\\\0.4 - 0.1 \\times 0.00029\\end{bmatrix} \\approx \\begin{bmatrix}0.299975\\\\0.399971\\end{bmatrix}$  \n",
        "\n",
        "\n",
        "### 六、迭代一次后的函数关系变化\n",
        "神经网络的本质是“输入$x$到输出$\\hat{y}$的非线性映射函数”，迭代一次的核心是**通过调整参数优化这个映射，使$\\hat{y}$更接近真实标签$y$**。  \n",
        "\n",
        "- **迭代前的函数关系**：$\\hat{y} = \\sigma\\left(W_3 \\cdot \\sigma\\left(W_2 \\cdot \\sigma\\left(W_1 \\cdot x + b_1\\right) + b_2\\right) + b_3\\right)$，代入初始参数后，对于$x=0.5$，输出$\\hat{y}=0.9620$（偏离$y=0.8$）。  \n",
        "- **迭代后的函数关系**：参数微调后，映射函数变为$\\hat{y} = \\sigma\\left(W_3^{\\text{新}} \\cdot \\sigma\\left(W_2^{\\text{新}} \\cdot \\sigma\\left(W_1^{\\text{新}} \\cdot x + b_1^{\\text{新}}\\right) + b_2^{\\text{新}}\\right) + b_3^{\\text{新}}\\right)$。此时对$x=0.5$重新计算，$\\hat{y}$会略减小（更接近0.8），损失$L$也会下降（从0.0131降至约0.0128）。  \n",
        "\n",
        "\n",
        "### 总结\n",
        "- **节点个数**：输入层1、隐藏层1（2）、隐藏层2（2）、输出层1，共6个节点，3层网络。  \n",
        "- **迭代一次的函数关系**：通过反向传播计算梯度并更新参数，使输入到输出的非线性映射更接近“输入$x$→真实标签$y$”的潜在关系，本质是用梯度下降最小化损失函数。"
      ],
      "metadata": {
        "id": "WDQzFP0LCNQ4"
      }
    }
  ]
}