{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "e9c8fba9-395c-46f1-8cd1-bf8de0d5d577",
   "metadata": {},
   "outputs": [],
   "source": [
    "import math\n",
    "import random\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "954d1184-c8c4-4ae1-be7b-09a7f3de4ea6",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Value:\n",
    "    def __init__(self, data, _children=(), _op=''):\n",
    "        self.data = data\n",
    "        self.grad = 0.0 # 梯度（向量）\n",
    "        self._backward = lambda: None # 反向传播函数，来执行梯度的自动传递（默认为空）\n",
    "        self._prev = set(_children) # 计算节点\n",
    "        self._op = _op # 计算符号\n",
    "\n",
    "    def __repr__(self):\n",
    "        return f\"Value(data={self.data}, grad={self.grad})\"\n",
    "\n",
    "    def __add__(self, other): # self 是加法的左操作数，other 是加法的右操作数\n",
    "        if isinstance(other, Value):\n",
    "            other = other # 如果 other 已经是 Value 类型，则保持原样\n",
    "        else:\n",
    "            other = Value(other) # 如果 other 不是 Value 类型，则将其包装成 Value 对象\n",
    "        \n",
    "        out = Value(self.data + other.data, (self, other), '+') # 创建 out 节点时建立的计算图（将 self 和 other 设为子节点）\n",
    "\n",
    "        def _backward():\n",
    "            self.grad += 1.0 * out.grad\n",
    "            other.grad += 1.0 * out.grad\n",
    "    \n",
    "        out._backward = _backward\n",
    "\n",
    "        return out # out 是加法的结果值\n",
    "\n",
    "    def __mul__(self, other):\n",
    "        if isinstance(other, Value):\n",
    "            other = other # 如果 other 已经是 Value 类型，则保持原样\n",
    "        else:\n",
    "            other = Value(other) # 如果 other 不是 Value 类型，则将其包装成 Value 对象\n",
    "\n",
    "        out = Value(self.data * other.data, (self, other), '*')\n",
    "\n",
    "        def _backward():\n",
    "            self.grad += other.data * out.grad\n",
    "            other.grad += self.data * out.grad\n",
    "\n",
    "        out._backward = _backward\n",
    "\n",
    "        return out\n",
    "\n",
    "    def __pow__(self, other):\n",
    "        assert isinstance(other, (int, float)), \"only supporting int/float powers for now\"\n",
    "        out = Value(self.data**other, (self,), f'**{other}')\n",
    "\n",
    "        def _backward():\n",
    "            self.grad += (other * self.data**(other-1)) * out.grad\n",
    "        out._backward = _backward\n",
    "\n",
    "        return out\n",
    "        \n",
    "    def __neg__(self): # -self\n",
    "        return self * -1\n",
    "\n",
    "    def __radd__(self, other): # other + self\n",
    "        return self + other\n",
    "\n",
    "    def __sub__(self, other): # self - other\n",
    "        return self + (-other)\n",
    "\n",
    "    def __rsub__(self, other): # other - self\n",
    "        return other + (-self)\n",
    "\n",
    "    def __rmul__(self, other): # other * self\n",
    "        return self * other\n",
    "\n",
    "    def __truediv__(self, other): # self / other\n",
    "        return self * other**-1\n",
    "\n",
    "    def __rtruediv__(self, other): # other / self\n",
    "        return other * self**-1\n",
    "\n",
    "    def tanh(self):\n",
    "        x = self.data\n",
    "        t = (math.exp(2*x) - 1) / (math.exp(2*x) + 1)\n",
    "        out = Value(t, (self, ), 'tanh')\n",
    "\n",
    "        def _backward():\n",
    "            self.grad += (1 - t**2) * out.grad\n",
    "    \n",
    "        out._backward = _backward\n",
    "\n",
    "        return out\n",
    "\n",
    "    def backward(self):\n",
    "        topo = [] # 存储拓扑排序结果\n",
    "        visited = set() # 记录已访问的节点\n",
    "\n",
    "        def build_topo(v):\n",
    "            if v not in visited: # 如果当前节点 v 没有被访问过，继续处理\n",
    "                visited.add(v) # 将 v 加入 visited 集合\n",
    "                for child in v._prev: # 对 v 的所有前驱节点(v._prev)递归调用 build_topo\n",
    "                    build_topo(child)\n",
    "                topo.append(v) # 当所有前驱节点都处理完后，将 v 加入 topo 列表\n",
    "\n",
    "        build_topo(self)\n",
    "\n",
    "        self.grad = 1.0\n",
    "        for node in reversed(topo):\n",
    "            node._backward()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "21b79d07-3b88-4620-9c3f-d45be16fcdba",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Neuron:\n",
    "    def __init__(self, nin): # nin 是输入数量（输入特征维度）\n",
    "        print(f\"创建 Neuron: {nin} 个输入\")\n",
    "        # 权重列表，包含 nin 个 Value 对象，每个权重初始化为 -1 到 1 之间的随机数\n",
    "        self.w = [Value(random.uniform(-1, 1)) for _ in range(nin)] \n",
    "        # 偏置项，也是一个 Value 对象，初始化为 -1 到 1 之间的随机数\n",
    "        self.b = Value(random.uniform(-1, 1)) \n",
    "\n",
    "    def __call__(self, x): # 前向传播\n",
    "        # print(f\"输入 x: {x}\")\n",
    "        # print(f\"权重 w: {[f'{w.data:.4f}' for w in self.w]}\")\n",
    "        # print(f\"偏置 b: {self.b.data:.4f}\")\n",
    "        \n",
    "        weighted_sum = 0\n",
    "        \n",
    "        for i, (wi, xi) in enumerate(zip(self.w, x)):\n",
    "            product = wi * xi\n",
    "            weighted_sum = weighted_sum + product if i > 0 else product\n",
    "            # print(f\"w{i}x{i}: {wi.data:.4f} × {xi} = {product.data:.4f}\")\n",
    "        \n",
    "        act = weighted_sum + self.b\n",
    "        # print(f\"加权和 + 偏置: {weighted_sum.data:.4f} + {self.b.data:.4f} = {act.data:.4f}\")\n",
    "        \n",
    "        out = act.tanh() # 通过激活函数\n",
    "        print(f\"-----------------------------------> 激活（神经元输出）: tanh({act.data:.4f}) = {out.data:.4f}\")\n",
    "        \n",
    "        return out # tanh(b + w₁x₁ + w₂x₂ + ... + wₙxₙ)\n",
    "\n",
    "    def parameters(self):\n",
    "        return self.w + [self.b]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "3eebfbcc-a7d5-48a1-a03c-e4db3abca2cd",
   "metadata": {},
   "outputs": [],
   "source": [
    "class Layer:\n",
    "    def __init__(self, nin, nout):\n",
    "        print(f\"创建 Layer: {nin} 个输入维度，{nout} 个输出维度\")\n",
    "        # 包含 nout 个神经元的列表，每个神经元都有 nin 个输入\n",
    "        self.neurons = [Neuron(nin) for _ in range(nout)] \n",
    "        print(f\"创建了 {len(self.neurons)} 个神经元\")\n",
    "\n",
    "    def __call__(self, x):\n",
    "        outs = [n(x) for n in self.neurons]\n",
    "        print(f\"-----------------------------------> Layer 输出: {[out.data for out in outs]}\")\n",
    "        return outs\n",
    "\n",
    "    def parameters(self):\n",
    "        return [p for n in self.neurons for p in n.parameters()] # 收集每一个神经元的参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "7f8bb323-e5a5-4073-933e-080d6d7ae6b7",
   "metadata": {},
   "outputs": [],
   "source": [
    "class MLP:\n",
    "    def __init__(self, nin, nouts): # nouts 是列表，表示各层的神经元数量\n",
    "        print(f\"创建 MLP：输入维度: {nin}，隐藏层结构：{nouts}\")\n",
    "        \n",
    "        sz = [nin] + nouts # 构建完整的网络尺寸列表\n",
    "        self.layers = [Layer(sz[i], sz[i + 1]) for i in range(len(nouts))] # 创建层列表，每层连接相邻的尺寸\n",
    "\n",
    "        print(f\"共创建了 {len(self.layers)} 层\")\n",
    "    \n",
    "    def __call__(self, x):\n",
    "        for layer in self.layers:\n",
    "            x = layer(x) # 依次通过每一层，前一层的输出作为后一层的输入\n",
    "        print(f\"-----------------------------------> MPL 输出: {[val.data for val in x]}\")\n",
    "        return x\n",
    "\n",
    "    def parameters(self):\n",
    "        return [p for layer in self.layers for p in layer.parameters()] # 收集每一层的参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "2f3c20db-3871-4b31-944c-270bad5149a5",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "创建 MLP：输入维度: 2，隐藏层结构：[3, 2, 1]\n",
      "创建 Layer: 2 个输入维度，3 个输出维度\n",
      "创建 Neuron: 2 个输入\n",
      "创建 Neuron: 2 个输入\n",
      "创建 Neuron: 2 个输入\n",
      "创建了 3 个神经元\n",
      "创建 Layer: 3 个输入维度，2 个输出维度\n",
      "创建 Neuron: 3 个输入\n",
      "创建 Neuron: 3 个输入\n",
      "创建了 2 个神经元\n",
      "创建 Layer: 2 个输入维度，1 个输出维度\n",
      "创建 Neuron: 2 个输入\n",
      "创建了 1 个神经元\n",
      "共创建了 3 层\n"
     ]
    }
   ],
   "source": [
    "# 创建 MLP：2 个输入 → 3 个神经元 → 2 个神经元 → 1 个输出\n",
    "n = MLP(2, [3, 2, 1]) "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "0d76f45a-4182-4990-b5bc-ed5e84a44f58",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 原始输入\n",
    "xs = [\n",
    "    [2.0, 3.0],\n",
    "    [3.0, -1.0],\n",
    "]\n",
    "# 真实值\n",
    "ys = [1.0, -1.0]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "6873b90b-9f00-4bb3-b10c-1e18f4fe26c1",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-----------------------------------> 激活（神经元输出）: tanh(-1.6754) = -0.9323\n",
      "-----------------------------------> 激活（神经元输出）: tanh(0.7157) = 0.6142\n",
      "-----------------------------------> 激活（神经元输出）: tanh(0.7235) = 0.6191\n",
      "-----------------------------------> Layer 输出: [-0.9322661909131034, 0.6142374750595943, 0.6190630422248108]\n",
      "-----------------------------------> 激活（神经元输出）: tanh(-0.0046) = -0.0046\n",
      "-----------------------------------> 激活（神经元输出）: tanh(-1.0897) = -0.7968\n",
      "-----------------------------------> Layer 输出: [-0.004632092080172008, -0.7967650528638669]\n",
      "-----------------------------------> 激活（神经元输出）: tanh(0.2756) = 0.2689\n",
      "-----------------------------------> Layer 输出: [0.2688550595643902]\n",
      "-----------------------------------> MPL 输出: [0.2688550595643902]\n",
      "-----------------------------------> 激活（神经元输出）: tanh(-3.2565) = -0.9970\n",
      "-----------------------------------> 激活（神经元输出）: tanh(3.3962) = 0.9978\n",
      "-----------------------------------> 激活（神经元输出）: tanh(-2.5157) = -0.9870\n",
      "-----------------------------------> Layer 输出: [-0.9970364571331297, 0.9977580414236081, -0.9870254870020759]\n",
      "-----------------------------------> 激活（神经元输出）: tanh(0.2789) = 0.2718\n",
      "-----------------------------------> 激活（神经元输出）: tanh(0.1272) = 0.1265\n",
      "-----------------------------------> Layer 输出: [0.2718438432470808, 0.12651061418663856]\n",
      "-----------------------------------> 激活（神经元输出）: tanh(-0.0747) = -0.0745\n",
      "-----------------------------------> Layer 输出: [-0.07452920040434831]\n",
      "-----------------------------------> MPL 输出: [-0.07452920040434831]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[Value(data=0.2688550595643902, grad=0.0),\n",
       " Value(data=-0.07452920040434831, grad=0.0)]"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 使用创建好的 MLP 对原始输入（两组）进行预测\n",
    "ypred = [n(x)[0] for x in xs]\n",
    "# 两组输入得到的也是两组输出\n",
    "ypred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "a807d83f-1338-4b31-9ac9-f8d04fe64884",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[Value(data=0.5345729239245914, grad=0.0),\n",
       " Value(data=0.856496200904215, grad=0.0)]"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "\"\"\"\n",
    "对于第一组输入 [2.0, 3.0]，期望得输出是 [1.0]，实际输出是 0.2688550595643902\n",
    "对于第二组输入 [3.0, -1.0]，期望得输出是 [-1.0]，实际输出是 -0.07452920040434831\n",
    "\n",
    "那么如何优化神经网络，也就是如何优化权重和偏置参数来更好的预测目标值？\n",
    "深度学习实现这一目标的方法核心方法是计算一个综合衡量神经网络性能的数值。该数值被称为损失值。\n",
    "当前的损失值比较欠佳，因为目标值（期望输出的值）与实际值（实际输出的值）差值比较大。所以需要做的是最小化该损失值。\n",
    "\"\"\"\n",
    "\n",
    "# 使用均方误差损失函数来实现该损失值得最小化\n",
    "[(yout - ygt)**2 for ygt, yout in zip(ys, ypred)]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "d008ce2a-47e3-48de-91b8-9178d282d953",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Value(data=1.3910691248288063, grad=0.0)"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 对各组的损失值进行求和\n",
    "loss = sum((yout - ygt)**2 for ygt, yout in zip(ys, ypred))\n",
    "loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "6855af80-cdc2-439f-b7b0-4faba1023947",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "[Value(data=-0.8405336648134414, grad=0.0),\n",
       " Value(data=0.18513793226994069, grad=0.0),\n",
       " Value(data=-0.5497781343455062, grad=0.0),\n",
       " Value(data=0.8450031022289088, grad=0.0),\n",
       " Value(data=-0.4588785982525607, grad=0.0),\n",
       " Value(data=0.4023275966572095, grad=0.0),\n",
       " Value(data=-0.8933078482454186, grad=0.0),\n",
       " Value(data=0.5864700592520342, grad=0.0),\n",
       " Value(data=0.7506900165500772, grad=0.0),\n",
       " Value(data=0.25708330962137005, grad=0.0),\n",
       " Value(data=0.5788359128726517, grad=0.0),\n",
       " Value(data=-0.048653435817504365, grad=0.0),\n",
       " Value(data=-0.09038521300653923, grad=0.0),\n",
       " Value(data=-0.5105044988055443, grad=0.0),\n",
       " Value(data=-0.5453712035298484, grad=0.0),\n",
       " Value(data=-0.8673107751041462, grad=0.0),\n",
       " Value(data=-0.6937088311726269, grad=0.0),\n",
       " Value(data=0.5677600179900635, grad=0.0),\n",
       " Value(data=-0.549423015842361, grad=0.0),\n",
       " Value(data=-0.1595018776271473, grad=0.0)]"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "n.parameters() # 查看 MLP 中的参数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "561749e6-2dc8-4101-875c-187f1057072c",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.0\n",
      "-0.8405336648134414\n",
      "0.0\n",
      "0.18513793226994069\n",
      "0.0\n",
      "0.8450031022289088\n",
      "0.0\n",
      "-0.4588785982525607\n"
     ]
    }
   ],
   "source": [
    "print(n.layers[0].neurons[0].w[0].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[0].w[0].data) # 神经元的值\n",
    "print(n.layers[0].neurons[0].w[1].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[0].w[1].data) # 神经元的值\n",
    "print(n.layers[0].neurons[1].w[0].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[1].w[0].data) # 神经元的值\n",
    "print(n.layers[0].neurons[1].w[1].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[1].w[1].data) # 神经元的值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "id": "1f186c78-01a9-443a-99c1-80abb8e7eca6",
   "metadata": {},
   "outputs": [],
   "source": [
    "loss.backward() # 对损失函数进行反向传播"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "id": "a5dc0f46-9c18-4357-a660-4451bccd2b69",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-0.07476298870152231\n",
      "-0.8405336648134414\n",
      "-0.1367795600840703\n",
      "0.18513793226994069\n",
      "-0.7252813348969714\n",
      "0.8450031022289088\n",
      "-1.1150911992694705\n",
      "-0.4588785982525607\n"
     ]
    }
   ],
   "source": [
    "print(n.layers[0].neurons[0].w[0].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[0].w[0].data) # 神经元的值\n",
    "print(n.layers[0].neurons[0].w[1].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[0].w[1].data) # 神经元的值\n",
    "print(n.layers[0].neurons[1].w[0].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[1].w[0].data) # 神经元的值\n",
    "print(n.layers[0].neurons[1].w[1].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[1].w[1].data) # 神经元的值"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "a94bd025-2a60-4b14-9998-a37fe914c254",
   "metadata": {},
   "source": [
    "进行一次反向传播后，梯度发生了变化，也就是 .grad 的值从 0 变成了非 0 值，但是 .data （前向传播的结果）是没有变化的，这是因为反向传播只计算梯度，接下来就要进行一次参数更新。\n",
    "\n",
    "梯度下降的目标是最小化损失函数。梯度的方向总是指向函数值增长最快的方向。\n",
    "\n",
    "参数更新公式：p = p - learning_rate * p.grad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "id": "ed804bf0-06c7-45ab-b58e-69e74f7dfa4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 进行一次参数更新\n",
    "for p in n.parameters():\n",
    "    p.data -= 0.01 * p.grad"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "id": "ad44f35d-4aef-43c5-b927-be2b6b20c127",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-0.07476298870152231\n",
      "-0.8397860349264261\n",
      "-0.1367795600840703\n",
      "0.1865057278707814\n",
      "-0.7252813348969714\n",
      "0.8522559155778786\n",
      "-1.1150911992694705\n",
      "-0.44772768625986603\n"
     ]
    }
   ],
   "source": [
    "print(n.layers[0].neurons[0].w[0].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[0].w[0].data) # 神经元的值\n",
    "print(n.layers[0].neurons[0].w[1].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[0].w[1].data) # 神经元的值\n",
    "print(n.layers[0].neurons[1].w[0].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[1].w[0].data) # 神经元的值\n",
    "print(n.layers[0].neurons[1].w[1].grad) # 该神经元在损失函数上的梯度\n",
    "print(n.layers[0].neurons[1].w[1].data) # 神经元的值"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 32,
   "id": "07485fec-5611-4f4c-9767-bb87b6c1ee19",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "-----------------------------------> 激活（神经元输出）: tanh(-1.6694) = -0.9315\n",
      "-----------------------------------> 激活（神经元输出）: tanh(0.7673) = 0.6454\n",
      "-----------------------------------> 激活（神经元输出）: tanh(0.7398) = 0.6290\n",
      "-----------------------------------> Layer 输出: [-0.9314771853818304, 0.6453610666999839, 0.6290184074872949]\n",
      "-----------------------------------> 激活（神经元输出）: tanh(0.0147) = 0.0147\n",
      "-----------------------------------> 激活（神经元输出）: tanh(-1.1035) = -0.8018\n",
      "-----------------------------------> Layer 输出: [0.0146536330283334, -0.8017537526815214]\n",
      "-----------------------------------> 激活（神经元输出）: tanh(0.2949) = 0.2867\n",
      "-----------------------------------> Layer 输出: [0.2866733122236254]\n",
      "-----------------------------------> MPL 输出: [0.2866733122236254]\n",
      "-----------------------------------> 激活（神经元输出）: tanh(-3.2552) = -0.9970\n",
      "-----------------------------------> 激活（神经元输出）: tanh(3.4105) = 0.9978\n",
      "-----------------------------------> 激活（神经元输出）: tanh(-2.5131) = -0.9870\n",
      "-----------------------------------> Layer 输出: [-0.9970289236225941, 0.9978210042695504, -0.9869586693322515]\n",
      "-----------------------------------> 激活（神经元输出）: tanh(0.2554) = 0.2500\n",
      "-----------------------------------> 激活（神经元输出）: tanh(0.1613) = 0.1599\n",
      "-----------------------------------> Layer 输出: [0.2499896946702094, 0.15989963140614297]\n",
      "-----------------------------------> 激活（神经元输出）: tanh(-0.1136) = -0.1131\n",
      "-----------------------------------> Layer 输出: [-0.11314169984104651]\n",
      "-----------------------------------> MPL 输出: [-0.11314169984104651]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "[Value(data=0.2866733122236254, grad=0.0),\n",
       " Value(data=-0.11314169984104651, grad=0.0)]"
      ]
     },
     "execution_count": 32,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 再次前向传播\n",
    "n_ypred = [n(x)[0] for x in xs]\n",
    "n_ypred"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "id": "b51fb4e9-b4f1-4317-b117-079fddeab602",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "Value(data=1.2953526080548419, grad=0.0)"
      ]
     },
     "execution_count": 38,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 查看损失\n",
    "n_loss = sum((yout - ygt)**2 for ygt, yout in zip(ys, n_ypred))\n",
    "n_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "99de85cf-ed4a-4641-9f13-a4dbd3b7a326",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
