{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "1daf3227-c139-4fc2-a30b-3149c84db4c6",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|                                                                             | 3/50000 [00:09<43:14:53,  3.11s/it]D:\\Software\\Python\\Lib\\site-packages\\numpy\\core\\_methods.py:176: RuntimeWarning: overflow encountered in multiply\n",
      "  x = um.multiply(x, x, out=x)\n",
      "  0%|                                                                             | 4/50000 [00:12<43:27:46,  3.13s/it]D:\\Software\\Python\\Lib\\site-packages\\numpy\\core\\_methods.py:187: RuntimeWarning: overflow encountered in reduce\n",
      "  ret = umr_sum(x, axis, dtype, out, keepdims=keepdims, where=where)\n",
      "C:\\Users\\96237\\AppData\\Local\\Temp\\ipykernel_22696\\4103474555.py:202: RuntimeWarning: overflow encountered in multiply\n",
      "  dx_hat = d_L_d_out * self.gamma\n",
      "D:\\Software\\Python\\Lib\\site-packages\\numpy\\core\\fromnumeric.py:88: RuntimeWarning: invalid value encountered in reduce\n",
      "  return ufunc.reduce(obj, axis, dtype, out, **passkwargs)\n",
      "C:\\Users\\96237\\AppData\\Local\\Temp\\ipykernel_22696\\4103474555.py:205: RuntimeWarning: invalid value encountered in multiply\n",
      "  m * dx_hat - np.sum(dx_hat, axis=0) - self.x_hat * np.sum(dx_hat * self.x_hat, axis=0))\n",
      "  0%|                                                                            | 73/50000 [04:11<47:43:14,  3.44s/it]\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[1], line 354\u001b[0m\n\u001b[0;32m    352\u001b[0m model \u001b[38;5;241m=\u001b[39m create_vgg(x_train\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m1\u001b[39m:])\n\u001b[0;32m    353\u001b[0m optimizer \u001b[38;5;241m=\u001b[39m Optimizer(method\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124madam\u001b[39m\u001b[38;5;124m'\u001b[39m, lr\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.001\u001b[39m)\n\u001b[1;32m--> 354\u001b[0m \u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m10\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moptimizer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreg_lambda\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.001\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m    356\u001b[0m \u001b[38;5;66;03m# 评估模型\u001b[39;00m\n\u001b[0;32m    357\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mevaluate\u001b[39m(model, X_test, y_test):\n",
      "Cell \u001b[1;32mIn[1], line 300\u001b[0m, in \u001b[0;36mtrain\u001b[1;34m(model, X_train, y_train, epochs, optimizer, reg_lambda)\u001b[0m\n\u001b[0;32m    297\u001b[0m     grad \u001b[38;5;241m=\u001b[39m output\n\u001b[0;32m    298\u001b[0m     grad[np\u001b[38;5;241m.\u001b[39margmax(y)] \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n\u001b[1;32m--> 300\u001b[0m     \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mgrad\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moptimizer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlr\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreg_lambda\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    302\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mEpoch \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mepoch\u001b[38;5;250m \u001b[39m\u001b[38;5;241m+\u001b[39m\u001b[38;5;250m \u001b[39m\u001b[38;5;241m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m, Loss: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mloss\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m, Accuracy: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnum_correct\u001b[38;5;250m \u001b[39m\u001b[38;5;241m/\u001b[39m\u001b[38;5;250m \u001b[39m\u001b[38;5;28mlen\u001b[39m(X_train)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n",
      "Cell \u001b[1;32mIn[1], line 274\u001b[0m, in \u001b[0;36mCNN.backward\u001b[1;34m(self, d_L_d_out, learn_rate, reg_lambda)\u001b[0m\n\u001b[0;32m    272\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mbackward\u001b[39m(\u001b[38;5;28mself\u001b[39m, d_L_d_out, learn_rate, reg_lambda):\n\u001b[0;32m    273\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m layer \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mreversed\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlayers):\n\u001b[1;32m--> 274\u001b[0m         d_L_d_out \u001b[38;5;241m=\u001b[39m \u001b[43mlayer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43md_L_d_out\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlearn_rate\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreg_lambda\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    275\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m d_L_d_out\n",
      "Cell \u001b[1;32mIn[1], line 77\u001b[0m, in \u001b[0;36mConvLayer.backward\u001b[1;34m(self, d_L_d_out, learn_rate, reg_lambda)\u001b[0m\n\u001b[0;32m     75\u001b[0m             region \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlast_padded_input[i \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride:i \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilter_size, j \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride:j \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilter_size]\n\u001b[0;32m     76\u001b[0m             d_L_d_filters[f] \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m d_L_d_out[i, j, f] \u001b[38;5;241m*\u001b[39m region\n\u001b[1;32m---> 77\u001b[0m             d_L_d_input[i \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride:i \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilter_size, j \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride:j \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilter_size] \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m d_L_d_out[i, j, f] \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilters[f]\n\u001b[0;32m     79\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilters \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m=\u001b[39m learn_rate \u001b[38;5;241m*\u001b[39m (d_L_d_filters \u001b[38;5;241m+\u001b[39m reg_lambda \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilters)\n\u001b[0;32m     80\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras.datasets import cifar10\n",
    "from tensorflow.keras.utils import to_categorical\n",
    "from tqdm import tqdm\n",
    "\n",
    "# 下载并加载 CIFAR-10 数据集\n",
    "(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n",
    "x_train = x_train.astype('float32') / 255.0\n",
    "x_test = x_test.astype('float32') / 255.0\n",
    "y_train = to_categorical(y_train, 10)\n",
    "y_test = to_categorical(y_test, 10)\n",
    "\n",
    "# 定义激活函数\n",
    "def relu(x):\n",
    "    return np.maximum(0, x)\n",
    "\n",
    "def relu_derivative(x):\n",
    "    return x > 0\n",
    "\n",
    "def sigmoid(x):\n",
    "    return 1 / (1 + np.exp(-x))\n",
    "\n",
    "def sigmoid_derivative(x):\n",
    "    return sigmoid(x) * (1 - sigmoid(x))\n",
    "\n",
    "def tanh(x):\n",
    "    return np.tanh(x)\n",
    "\n",
    "def tanh_derivative(x):\n",
    "    return 1 - np.tanh(x) ** 2\n",
    "\n",
    "def softmax(x):\n",
    "    exp_x = np.exp(x - np.max(x))\n",
    "    return exp_x / np.sum(exp_x)\n",
    "\n",
    "def softmax_derivative(x):\n",
    "    exp_x = np.exp(x - np.max(x))\n",
    "    return exp_x / np.sum(exp_x) * (1 - exp_x / np.sum(exp_x))\n",
    "\n",
    "# 定义卷积层\n",
    "class ConvLayer:\n",
    "    def __init__(self, num_filters, filter_size, input_depth, stride=1, padding=1):\n",
    "        self.num_filters = num_filters\n",
    "        self.filter_size = filter_size\n",
    "        self.input_depth = input_depth\n",
    "        self.stride = stride\n",
    "        self.padding = padding\n",
    "        self.filters = np.random.randn(num_filters, filter_size, filter_size, input_depth) / (filter_size * filter_size)\n",
    "\n",
    "    def forward(self, input):\n",
    "        self.last_input = input\n",
    "        h, w, d = input.shape\n",
    "        if self.padding > 0:\n",
    "            input = np.pad(input, [(self.padding, self.padding), (self.padding, self.padding), (0, 0)], mode='constant')\n",
    "        self.last_padded_input = input\n",
    "        h_p, w_p, d_p = input.shape\n",
    "        out_h = (h_p - self.filter_size) // self.stride + 1\n",
    "        out_w = (w_p - self.filter_size) // self.stride + 1\n",
    "        output = np.zeros((out_h, out_w, self.num_filters))\n",
    "        for i in range(out_h):\n",
    "            for j in range(out_w):\n",
    "                for f in range(self.num_filters):\n",
    "                    region = input[i * self.stride:i * self.stride + self.filter_size, j * self.stride:j * self.stride + self.filter_size]\n",
    "                    output[i, j, f] = np.sum(region * self.filters[f])\n",
    "        return relu(output)\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate, reg_lambda):\n",
    "        d_L_d_filters = np.zeros(self.filters.shape)\n",
    "        d_L_d_input = np.zeros(self.last_padded_input.shape)\n",
    "        \n",
    "        for i in range(d_L_d_out.shape[0]):\n",
    "            for j in range(d_L_d_out.shape[1]):\n",
    "                for f in range(self.num_filters):\n",
    "                    region = self.last_padded_input[i * self.stride:i * self.stride + self.filter_size, j * self.stride:j * self.stride + self.filter_size]\n",
    "                    d_L_d_filters[f] += d_L_d_out[i, j, f] * region\n",
    "                    d_L_d_input[i * self.stride:i * self.stride + self.filter_size, j * self.stride:j * self.stride + self.filter_size] += d_L_d_out[i, j, f] * self.filters[f]\n",
    "\n",
    "        self.filters -= learn_rate * (d_L_d_filters + reg_lambda * self.filters)\n",
    "        if self.padding > 0:\n",
    "            d_L_d_input = d_L_d_input[self.padding:-self.padding, self.padding:-self.padding, :]\n",
    "        return d_L_d_input\n",
    "\n",
    "# 定义池化层\n",
    "class MaxPoolLayer:\n",
    "    def __init__(self, size, stride):\n",
    "        self.size = size\n",
    "        self.stride = stride\n",
    "\n",
    "    def forward(self, input):\n",
    "        self.last_input = input\n",
    "        h, w, num_filters = input.shape\n",
    "        output_h = (h - self.size) // self.stride + 1\n",
    "        output_w = (w - self.size) // self.stride + 1\n",
    "        output = np.zeros((output_h, output_w, num_filters))\n",
    "        for i in range(output_h):\n",
    "            for j in range(output_w):\n",
    "                region = input[i*self.stride:i*self.stride + self.size, j*self.stride:j*self.stride + self.size]\n",
    "                output[i, j] = np.max(region, axis=(0, 1))\n",
    "        return output\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate=None, reg_lambda=None):\n",
    "        d_L_d_input = np.zeros(self.last_input.shape)\n",
    "        for i in range(d_L_d_out.shape[0]):\n",
    "            for j in range(d_L_d_out.shape[1]):\n",
    "                for f in range(d_L_d_out.shape[2]):\n",
    "                    region = self.last_input[i*self.stride:i*self.stride + self.size, j*self.stride:j*self.stride + self.size, f]\n",
    "                    max_val = np.max(region)\n",
    "                    for i_region in range(region.shape[0]):\n",
    "                        for j_region in range(region.shape[1]):\n",
    "                            if region[i_region, j_region] == max_val:\n",
    "                                d_L_d_input[i*self.stride + i_region, j*self.stride + j_region, f] = d_L_d_out[i, j, f]\n",
    "        return d_L_d_input\n",
    "\n",
    "# 定义全连接层\n",
    "class DenseLayer:\n",
    "    def __init__(self, input_len, output_len, activation='relu'):\n",
    "        self.weights = np.random.randn(input_len, output_len) / np.sqrt(input_len)\n",
    "        self.biases = np.zeros(output_len)\n",
    "        self.activation = activation\n",
    "\n",
    "    def forward(self, input):\n",
    "        self.last_input_shape = input.shape\n",
    "        self.last_input = input.flatten()\n",
    "        self.last_output = np.dot(self.last_input, self.weights) + self.biases\n",
    "        return self.activate(self.last_output)\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate, reg_lambda):\n",
    "        d_L_d_input = d_L_d_out * self.activate_derivative(self.last_output)\n",
    "        d_L_d_weights = np.dot(self.last_input[:, None], d_L_d_input[None, :])\n",
    "        d_L_d_biases = d_L_d_input\n",
    "        d_L_d_input = np.dot(d_L_d_input, self.weights.T).reshape(self.last_input_shape)\n",
    "\n",
    "        self.weights -= learn_rate * (d_L_d_weights + reg_lambda * self.weights)\n",
    "        self.biases -= learn_rate * d_L_d_biases\n",
    "        return d_L_d_input\n",
    "\n",
    "    def activate(self, x):\n",
    "        if self.activation == 'relu':\n",
    "            return relu(x)\n",
    "        elif self.activation == 'sigmoid':\n",
    "            return sigmoid(x)\n",
    "        elif self.activation == 'tanh':\n",
    "            return tanh(x)\n",
    "        elif self.activation == 'softmax':\n",
    "            return softmax(x)\n",
    "        else:\n",
    "            raise ValueError(\"Invalid activation function\")\n",
    "\n",
    "    def activate_derivative(self, x):\n",
    "        if self.activation == 'relu':\n",
    "            return relu_derivative(x)\n",
    "        elif self.activation == 'sigmoid':\n",
    "            return sigmoid_derivative(x)\n",
    "        elif self.activation == 'tanh':\n",
    "            return tanh_derivative(x)\n",
    "        elif self.activation == 'softmax':\n",
    "            return softmax_derivative(x)\n",
    "        else:\n",
    "            raise ValueError(\"Invalid activation function\")\n",
    "\n",
    "# 定义批量归一化层\n",
    "class BatchNormLayer:\n",
    "    def __init__(self, num_features, momentum=0.9):\n",
    "        self.gamma = np.ones(num_features)\n",
    "        self.beta = np.zeros(num_features)\n",
    "        self.momentum = momentum\n",
    "        self.running_mean = np.zeros(num_features)\n",
    "        self.running_var = np.zeros(num_features)\n",
    "        self.training = True\n",
    "        self.x_hat = None\n",
    "\n",
    "    def forward(self, input):\n",
    "        self.last_input = input\n",
    "        self.last_input_shape = input.shape\n",
    "        input = input.reshape(-1, self.last_input_shape[-1])\n",
    "\n",
    "        if self.training:\n",
    "            self.mean = np.mean(input, axis=0)\n",
    "            self.variance = np.var(input, axis=0)\n",
    "            self.running_mean = self.momentum * self.running_mean + (1 - self.momentum) * self.mean\n",
    "            self.running_var = self.momentum * self.running_var + (1 - self.momentum) * self.variance\n",
    "        else:\n",
    "            self.mean = self.running_mean\n",
    "            self.variance = self.running_var\n",
    "\n",
    "        self.x_hat = (input - self.mean) / np.sqrt(self.variance + 1e-8)\n",
    "        self.y = self.gamma * self.x_hat + self.beta\n",
    "\n",
    "        return self.y.reshape(self.last_input_shape)\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate, reg_lambda):\n",
    "        if self.x_hat is None:\n",
    "            raise ValueError(\"x_hat is not initialized. Forward pass must be called before backward pass.\")\n",
    "        \n",
    "        input = self.last_input.reshape(-1, self.last_input_shape[-1])\n",
    "        d_L_d_out = d_L_d_out.reshape(-1, self.last_input_shape[-1])\n",
    "        m = input.shape[0]\n",
    "\n",
    "        dbeta = np.sum(d_L_d_out, axis=0)\n",
    "        dgamma = np.sum(d_L_d_out * self.x_hat, axis=0)\n",
    "        dx_hat = d_L_d_out * self.gamma\n",
    "\n",
    "        dinput = (1. / m) * (1. / np.sqrt(self.variance + 1e-8)) * (\n",
    "                m * dx_hat - np.sum(dx_hat, axis=0) - self.x_hat * np.sum(dx_hat * self.x_hat, axis=0))\n",
    "\n",
    "        self.gamma -= learn_rate * (dgamma + reg_lambda * self.gamma)\n",
    "        self.beta -= learn_rate * dbeta\n",
    "\n",
    "        return dinput.reshape(self.last_input_shape)\n",
    "\n",
    "    def set_training(self, training=True):\n",
    "        self.training = training\n",
    "\n",
    "# 定义 Dropout 层\n",
    "class DropoutLayer:\n",
    "    def __init__(self, rate):\n",
    "        self.rate = rate\n",
    "        self.training = True\n",
    "\n",
    "    def forward(self, input):\n",
    "        if self.training:\n",
    "            self.mask = (np.random.rand(*input.shape) > self.rate) / (1 - self.rate)\n",
    "            return input * self.mask\n",
    "        else:\n",
    "            return input\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate=None, reg_lambda=None):\n",
    "        return d_L_d_out * self.mask\n",
    "\n",
    "    def set_training(self, training=True):\n",
    "        self.training = training\n",
    "\n",
    "# 定义优化器\n",
    "class Optimizer:\n",
    "    def __init__(self, method='sgd', lr=0.01, beta1=0.9, beta2=0.999, epsilon=1e-8):\n",
    "        self.method = method\n",
    "        self.lr = lr\n",
    "        self.beta1 = beta1\n",
    "        self.beta2 = beta2\n",
    "        self.epsilon = epsilon\n",
    "        self.m = None\n",
    "        self.v = None\n",
    "        self.t = 0\n",
    "\n",
    "    def update(self, params, grads):\n",
    "        if self.method == 'sgd':\n",
    "            return [p - self.lr * g for p, g in zip(params, grads)]\n",
    "        elif self.method == 'adam':\n",
    "            if self.m is None:\n",
    "                self.m = [np.zeros_like(p) for p in params]\n",
    "                self.v = [np.zeros_like(p) for p in params]\n",
    "            self.t += 1\n",
    "            self.m = [self.beta1 * m + (1 - self.beta1) * g for m, g in zip(self.m, grads)]\n",
    "            self.v = [self.beta2 * v + (1 - self.beta2) * (g ** 2) for v, g in zip(self.v, grads)]\n",
    "            m_hat = [m / (1 - self.beta1 ** self.t) for m in self.m]\n",
    "            v_hat = [v / (1 - self.beta2 ** self.t) for v in self.v]\n",
    "            return [p - self.lr * m / (np.sqrt(v) + self.epsilon) for p, m, v in zip(params, m_hat, v_hat)]\n",
    "        else:\n",
    "            raise ValueError(\"Invalid optimization method\")\n",
    "\n",
    "# 定义通用的CNN模型\n",
    "class CNN:\n",
    "    def __init__(self, layers):\n",
    "        self.layers = layers\n",
    "\n",
    "    def forward(self, input):\n",
    "        for layer in self.layers:\n",
    "            input = layer.forward(input)\n",
    "        return input\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate, reg_lambda):\n",
    "        for layer in reversed(self.layers):\n",
    "            d_L_d_out = layer.backward(d_L_d_out, learn_rate, reg_lambda)\n",
    "        return d_L_d_out\n",
    "\n",
    "    def set_training(self, training=True):\n",
    "        for layer in self.layers:\n",
    "            if isinstance(layer, BatchNormLayer) or isinstance(layer, DropoutLayer):\n",
    "                layer.set_training(training)\n",
    "\n",
    "# 定义训练函数\n",
    "def train(model, X_train, y_train, epochs, optimizer, reg_lambda):\n",
    "    for epoch in range(epochs):\n",
    "        loss = 0\n",
    "        num_correct = 0\n",
    "        model.set_training(True)\n",
    "        for i in tqdm(range(len(X_train))):\n",
    "            x, y = X_train[i], y_train[i]\n",
    "            output = model.forward(x)\n",
    "\n",
    "            if np.argmax(output) == np.argmax(y):\n",
    "                num_correct += 1\n",
    "\n",
    "            loss += -np.log(output[np.argmax(y)])\n",
    "\n",
    "            grad = output\n",
    "            grad[np.argmax(y)] -= 1\n",
    "\n",
    "            model.backward(grad, optimizer.lr, reg_lambda)\n",
    "\n",
    "        print(f'Epoch {epoch + 1}, Loss: {loss}, Accuracy: {num_correct / len(X_train)}')\n",
    "\n",
    "# 定义VGG架构\n",
    "def create_vgg(input_shape):\n",
    "    layers = [\n",
    "        ConvLayer(64, 3, 3, stride=1, padding=1),   # Conv1_1\n",
    "        BatchNormLayer(64),    # BN1_1\n",
    "        ConvLayer(64, 3, 64, stride=1, padding=1),  # Conv1_2\n",
    "        BatchNormLayer(64),    # BN1_2\n",
    "        MaxPoolLayer(2, 2),       # Pool1\n",
    "        \n",
    "        ConvLayer(128, 3, 64, stride=1, padding=1), # Conv2_1\n",
    "        BatchNormLayer(128),   # BN2_1\n",
    "        ConvLayer(128, 3, 128, stride=1, padding=1),# Conv2_2\n",
    "        BatchNormLayer(128),   # BN2_2\n",
    "        MaxPoolLayer(2, 2),       # Pool2\n",
    "        \n",
    "        ConvLayer(256, 3, 128, stride=1, padding=1),# Conv3_1\n",
    "        BatchNormLayer(256),   # BN3_1\n",
    "        ConvLayer(256, 3, 256, stride=1, padding=1),# Conv3_2\n",
    "        BatchNormLayer(256),   # BN3_2\n",
    "        ConvLayer(256, 3, 256, stride=1, padding=1),# Conv3_3\n",
    "        BatchNormLayer(256),   # BN3_3\n",
    "        MaxPoolLayer(2, 2),       # Pool3\n",
    "        \n",
    "        ConvLayer(512, 3, 256, stride=1, padding=1),# Conv4_1\n",
    "        BatchNormLayer(512),   # BN4_1\n",
    "        ConvLayer(512, 3, 512, stride=1, padding=1),# Conv4_2\n",
    "        BatchNormLayer(512),   # BN4_2\n",
    "        ConvLayer(512, 3, 512, stride=1, padding=1),# Conv4_3\n",
    "        BatchNormLayer(512),   # BN4_3\n",
    "        MaxPoolLayer(2, 2),       # Pool4\n",
    "        \n",
    "        ConvLayer(512, 3, 512, stride=1, padding=1),# Conv5_1\n",
    "        BatchNormLayer(512),   # BN5_1\n",
    "        ConvLayer(512, 3, 512, stride=1, padding=1),# Conv5_2\n",
    "        BatchNormLayer(512),   # BN5_2\n",
    "        ConvLayer(512, 3, 512, stride=1, padding=1),# Conv5_3\n",
    "        BatchNormLayer(512),   # BN5_3\n",
    "        MaxPoolLayer(2, 2),       # Pool5\n",
    "        \n",
    "        DenseLayer(512, 4096, activation='relu'),  # FC6\n",
    "        DropoutLayer(0.5),     # Dropout1\n",
    "        DenseLayer(4096, 4096, activation='relu'),         # FC7\n",
    "        DropoutLayer(0.5),     # Dropout2\n",
    "        DenseLayer(4096, 10, activation='softmax')         # FC8\n",
    "    ]\n",
    "    return CNN(layers)\n",
    "\n",
    "# 实例化并训练VGG模型\n",
    "model = create_vgg(x_train.shape[1:])\n",
    "optimizer = Optimizer(method='adam', lr=0.001)\n",
    "train(model, x_train, y_train, epochs=10, optimizer=optimizer, reg_lambda=0.001)\n",
    "\n",
    "# 评估模型\n",
    "def evaluate(model, X_test, y_test):\n",
    "    correct = 0\n",
    "    model.set_training(False)\n",
    "    for i in range(len(X_test)):\n",
    "        x, y = X_test[i], y_test[i]\n",
    "        output = model.forward(x)\n",
    "        if np.argmax(output) == np.argmax(y):\n",
    "            correct += 1\n",
    "    accuracy = correct / len(X_test)\n",
    "    print(f'Test Accuracy: {accuracy * 100:.2f}%')\n",
    "\n",
    "evaluate(model, x_test, y_test)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "75aa0894-4099-4d59-93d1-04fee6e69b12",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  0%|                                                                            | 41/50000 [02:25<49:09:55,  3.54s/it]\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[1], line 359\u001b[0m\n\u001b[0;32m    357\u001b[0m model \u001b[38;5;241m=\u001b[39m create_vgg(x_train\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m1\u001b[39m:])\n\u001b[0;32m    358\u001b[0m optimizer \u001b[38;5;241m=\u001b[39m Optimizer(method\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124madam\u001b[39m\u001b[38;5;124m'\u001b[39m, lr\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.0001\u001b[39m)\n\u001b[1;32m--> 359\u001b[0m \u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m10\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moptimizer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreg_lambda\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.001\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m    361\u001b[0m \u001b[38;5;66;03m# 评估模型\u001b[39;00m\n\u001b[0;32m    362\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mevaluate\u001b[39m(model, X_test, y_test):\n",
      "Cell \u001b[1;32mIn[1], line 305\u001b[0m, in \u001b[0;36mtrain\u001b[1;34m(model, X_train, y_train, epochs, optimizer, reg_lambda)\u001b[0m\n\u001b[0;32m    302\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m grad_norm \u001b[38;5;241m>\u001b[39m max_grad_norm:\n\u001b[0;32m    303\u001b[0m         grad \u001b[38;5;241m=\u001b[39m grad \u001b[38;5;241m*\u001b[39m (max_grad_norm \u001b[38;5;241m/\u001b[39m grad_norm)\n\u001b[1;32m--> 305\u001b[0m     \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mgrad\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moptimizer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mlr\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreg_lambda\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    307\u001b[0m \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mEpoch \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mepoch\u001b[38;5;250m \u001b[39m\u001b[38;5;241m+\u001b[39m\u001b[38;5;250m \u001b[39m\u001b[38;5;241m1\u001b[39m\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m, Loss: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mloss\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m, Accuracy: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mnum_correct\u001b[38;5;250m \u001b[39m\u001b[38;5;241m/\u001b[39m\u001b[38;5;250m \u001b[39m\u001b[38;5;28mlen\u001b[39m(X_train)\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m'\u001b[39m)\n",
      "Cell \u001b[1;32mIn[1], line 274\u001b[0m, in \u001b[0;36mCNN.backward\u001b[1;34m(self, d_L_d_out, learn_rate, reg_lambda)\u001b[0m\n\u001b[0;32m    272\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mbackward\u001b[39m(\u001b[38;5;28mself\u001b[39m, d_L_d_out, learn_rate, reg_lambda):\n\u001b[0;32m    273\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m layer \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mreversed\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlayers):\n\u001b[1;32m--> 274\u001b[0m         d_L_d_out \u001b[38;5;241m=\u001b[39m \u001b[43mlayer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mbackward\u001b[49m\u001b[43m(\u001b[49m\u001b[43md_L_d_out\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mlearn_rate\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreg_lambda\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    275\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m d_L_d_out\n",
      "Cell \u001b[1;32mIn[1], line 77\u001b[0m, in \u001b[0;36mConvLayer.backward\u001b[1;34m(self, d_L_d_out, learn_rate, reg_lambda)\u001b[0m\n\u001b[0;32m     75\u001b[0m             region \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlast_padded_input[i \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride:i \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilter_size, j \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride:j \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilter_size]\n\u001b[0;32m     76\u001b[0m             d_L_d_filters[f] \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m d_L_d_out[i, j, f] \u001b[38;5;241m*\u001b[39m region\n\u001b[1;32m---> 77\u001b[0m             d_L_d_input[i \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride:i \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilter_size, j \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride:j \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilter_size] \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m d_L_d_out[i, j, f] \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilters[f]\n\u001b[0;32m     79\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilters \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m=\u001b[39m learn_rate \u001b[38;5;241m*\u001b[39m (d_L_d_filters \u001b[38;5;241m+\u001b[39m reg_lambda \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilters)\n\u001b[0;32m     80\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mpadding \u001b[38;5;241m>\u001b[39m \u001b[38;5;241m0\u001b[39m:\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras.datasets import cifar10\n",
    "from tensorflow.keras.utils import to_categorical\n",
    "from tqdm import tqdm\n",
    "\n",
    "# 下载并加载 CIFAR-10 数据集\n",
    "(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n",
    "x_train = x_train.astype('float32') / 255.0\n",
    "x_test = x_test.astype('float32') / 255.0\n",
    "y_train = to_categorical(y_train, 10)\n",
    "y_test = to_categorical(y_test, 10)\n",
    "\n",
    "# 定义激活函数\n",
    "def relu(x):\n",
    "    return np.maximum(0, x)\n",
    "\n",
    "def relu_derivative(x):\n",
    "    return x > 0\n",
    "\n",
    "def sigmoid(x):\n",
    "    return 1 / (1 + np.exp(-x))\n",
    "\n",
    "def sigmoid_derivative(x):\n",
    "    return sigmoid(x) * (1 - sigmoid(x))\n",
    "\n",
    "def tanh(x):\n",
    "    return np.tanh(x)\n",
    "\n",
    "def tanh_derivative(x):\n",
    "    return 1 - np.tanh(x) ** 2\n",
    "\n",
    "def softmax(x):\n",
    "    exp_x = np.exp(x - np.max(x))\n",
    "    return exp_x / np.sum(exp_x)\n",
    "\n",
    "def softmax_derivative(x):\n",
    "    exp_x = np.exp(x - np.max(x))\n",
    "    return exp_x / np.sum(exp_x) * (1 - exp_x / np.sum(exp_x))\n",
    "\n",
    "# 定义卷积层\n",
    "class ConvLayer:\n",
    "    def __init__(self, num_filters, filter_size, input_depth, stride=1, padding=1):\n",
    "        self.num_filters = num_filters\n",
    "        self.filter_size = filter_size\n",
    "        self.input_depth = input_depth\n",
    "        self.stride = stride\n",
    "        self.padding = padding\n",
    "        self.filters = np.random.randn(num_filters, filter_size, filter_size, input_depth) * np.sqrt(2 / (filter_size * filter_size * input_depth))\n",
    "\n",
    "    def forward(self, input):\n",
    "        self.last_input = input\n",
    "        h, w, d = input.shape\n",
    "        if self.padding > 0:\n",
    "            input = np.pad(input, [(self.padding, self.padding), (self.padding, self.padding), (0, 0)], mode='constant')\n",
    "        self.last_padded_input = input\n",
    "        h_p, w_p, d_p = input.shape\n",
    "        out_h = (h_p - self.filter_size) // self.stride + 1\n",
    "        out_w = (w_p - self.filter_size) // self.stride + 1\n",
    "        output = np.zeros((out_h, out_w, self.num_filters))\n",
    "        for i in range(out_h):\n",
    "            for j in range(out_w):\n",
    "                for f in range(self.num_filters):\n",
    "                    region = input[i * self.stride:i * self.stride + self.filter_size, j * self.stride:j * self.stride + self.filter_size]\n",
    "                    output[i, j, f] = np.sum(region * self.filters[f])\n",
    "        return relu(output)\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate, reg_lambda):\n",
    "        d_L_d_filters = np.zeros(self.filters.shape)\n",
    "        d_L_d_input = np.zeros(self.last_padded_input.shape)\n",
    "        \n",
    "        for i in range(d_L_d_out.shape[0]):\n",
    "            for j in range(d_L_d_out.shape[1]):\n",
    "                for f in range(self.num_filters):\n",
    "                    region = self.last_padded_input[i * self.stride:i * self.stride + self.filter_size, j * self.stride:j * self.stride + self.filter_size]\n",
    "                    d_L_d_filters[f] += d_L_d_out[i, j, f] * region\n",
    "                    d_L_d_input[i * self.stride:i * self.stride + self.filter_size, j * self.stride:j * self.stride + self.filter_size] += d_L_d_out[i, j, f] * self.filters[f]\n",
    "\n",
    "        self.filters -= learn_rate * (d_L_d_filters + reg_lambda * self.filters)\n",
    "        if self.padding > 0:\n",
    "            d_L_d_input = d_L_d_input[self.padding:-self.padding, self.padding:-self.padding, :]\n",
    "        return d_L_d_input\n",
    "\n",
    "# 定义池化层\n",
    "class MaxPoolLayer:\n",
    "    def __init__(self, size, stride):\n",
    "        self.size = size\n",
    "        self.stride = stride\n",
    "\n",
    "    def forward(self, input):\n",
    "        self.last_input = input\n",
    "        h, w, num_filters = input.shape\n",
    "        output_h = (h - self.size) // self.stride + 1\n",
    "        output_w = (w - self.size) // self.stride + 1\n",
    "        output = np.zeros((output_h, output_w, num_filters))\n",
    "        for i in range(output_h):\n",
    "            for j in range(output_w):\n",
    "                region = input[i*self.stride:i*self.stride + self.size, j*self.stride:j*self.stride + self.size]\n",
    "                output[i, j] = np.max(region, axis=(0, 1))\n",
    "        return output\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate=None, reg_lambda=None):\n",
    "        d_L_d_input = np.zeros(self.last_input.shape)\n",
    "        for i in range(d_L_d_out.shape[0]):\n",
    "            for j in range(d_L_d_out.shape[1]):\n",
    "                for f in range(d_L_d_out.shape[2]):\n",
    "                    region = self.last_input[i*self.stride:i*self.stride + self.size, j*self.stride:j*self.stride + self.size, f]\n",
    "                    max_val = np.max(region)\n",
    "                    for i_region in range(region.shape[0]):\n",
    "                        for j_region in range(region.shape[1]):\n",
    "                            if region[i_region, j_region] == max_val:\n",
    "                                d_L_d_input[i*self.stride + i_region, j*self.stride + j_region, f] = d_L_d_out[i, j, f]\n",
    "        return d_L_d_input\n",
    "\n",
    "# 定义全连接层\n",
    "class DenseLayer:\n",
    "    def __init__(self, input_len, output_len, activation='relu'):\n",
    "        self.weights = np.random.randn(input_len, output_len) * np.sqrt(2 / input_len)\n",
    "        self.biases = np.zeros(output_len)\n",
    "        self.activation = activation\n",
    "\n",
    "    def forward(self, input):\n",
    "        self.last_input_shape = input.shape\n",
    "        self.last_input = input.flatten()\n",
    "        self.last_output = np.dot(self.last_input, self.weights) + self.biases\n",
    "        return self.activate(self.last_output)\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate, reg_lambda):\n",
    "        d_L_d_input = d_L_d_out * self.activate_derivative(self.last_output)\n",
    "        d_L_d_weights = np.dot(self.last_input[:, None], d_L_d_input[None, :])\n",
    "        d_L_d_biases = d_L_d_input\n",
    "        d_L_d_input = np.dot(d_L_d_input, self.weights.T).reshape(self.last_input_shape)\n",
    "\n",
    "        self.weights -= learn_rate * (d_L_d_weights + reg_lambda * self.weights)\n",
    "        self.biases -= learn_rate * d_L_d_biases\n",
    "        return d_L_d_input\n",
    "\n",
    "    def activate(self, x):\n",
    "        if self.activation == 'relu':\n",
    "            return relu(x)\n",
    "        elif self.activation == 'sigmoid':\n",
    "            return sigmoid(x)\n",
    "        elif self.activation == 'tanh':\n",
    "            return tanh(x)\n",
    "        elif self.activation == 'softmax':\n",
    "            return softmax(x)\n",
    "        else:\n",
    "            raise ValueError(\"Invalid activation function\")\n",
    "\n",
    "    def activate_derivative(self, x):\n",
    "        if self.activation == 'relu':\n",
    "            return relu_derivative(x)\n",
    "        elif self.activation == 'sigmoid':\n",
    "            return sigmoid_derivative(x)\n",
    "        elif self.activation == 'tanh':\n",
    "            return tanh_derivative(x)\n",
    "        elif self.activation == 'softmax':\n",
    "            return softmax_derivative(x)\n",
    "        else:\n",
    "            raise ValueError(\"Invalid activation function\")\n",
    "\n",
    "# 定义批量归一化层\n",
    "class BatchNormLayer:\n",
    "    def __init__(self, num_features, momentum=0.9):\n",
    "        self.gamma = np.ones(num_features)\n",
    "        self.beta = np.zeros(num_features)\n",
    "        self.momentum = momentum\n",
    "        self.running_mean = np.zeros(num_features)\n",
    "        self.running_var = np.zeros(num_features)\n",
    "        self.training = True\n",
    "        self.x_hat = None\n",
    "\n",
    "    def forward(self, input):\n",
    "        self.last_input = input\n",
    "        self.last_input_shape = input.shape\n",
    "        input = input.reshape(-1, self.last_input_shape[-1])\n",
    "\n",
    "        if self.training:\n",
    "            self.mean = np.mean(input, axis=0)\n",
    "            self.variance = np.var(input, axis=0)\n",
    "            self.running_mean = self.momentum * self.running_mean + (1 - self.momentum) * self.mean\n",
    "            self.running_var = self.momentum * self.running_var + (1 - self.momentum) * self.variance\n",
    "        else:\n",
    "            self.mean = self.running_mean\n",
    "            self.variance = self.running_var\n",
    "\n",
    "        self.x_hat = (input - self.mean) / np.sqrt(self.variance + 1e-8)\n",
    "        self.y = self.gamma * self.x_hat + self.beta\n",
    "\n",
    "        return self.y.reshape(self.last_input_shape)\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate, reg_lambda):\n",
    "        if self.x_hat is None:\n",
    "            raise ValueError(\"x_hat is not initialized. Forward pass must be called before backward pass.\")\n",
    "        \n",
    "        input = self.last_input.reshape(-1, self.last_input_shape[-1])\n",
    "        d_L_d_out = d_L_d_out.reshape(-1, self.last_input_shape[-1])\n",
    "        m = input.shape[0]\n",
    "\n",
    "        dbeta = np.sum(d_L_d_out, axis=0)\n",
    "        dgamma = np.sum(d_L_d_out * self.x_hat, axis=0)\n",
    "        dx_hat = d_L_d_out * self.gamma\n",
    "\n",
    "        dinput = (1. / m) * (1. / np.sqrt(self.variance + 1e-8)) * (\n",
    "                m * dx_hat - np.sum(dx_hat, axis=0) - self.x_hat * np.sum(dx_hat * self.x_hat, axis=0))\n",
    "\n",
    "        self.gamma -= learn_rate * (dgamma + reg_lambda * self.gamma)\n",
    "        self.beta -= learn_rate * dbeta\n",
    "\n",
    "        return dinput.reshape(self.last_input_shape)\n",
    "\n",
    "    def set_training(self, training=True):\n",
    "        self.training = training\n",
    "\n",
    "# 定义 Dropout 层\n",
    "class DropoutLayer:\n",
    "    def __init__(self, rate):\n",
    "        self.rate = rate\n",
    "        self.training = True\n",
    "\n",
    "    def forward(self, input):\n",
    "        if self.training:\n",
    "            self.mask = (np.random.rand(*input.shape) > self.rate) / (1 - self.rate)\n",
    "            return input * self.mask\n",
    "        else:\n",
    "            return input\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate=None, reg_lambda=None):\n",
    "        return d_L_d_out * self.mask\n",
    "\n",
    "    def set_training(self, training=True):\n",
    "        self.training = training\n",
    "\n",
    "# 定义优化器\n",
    "class Optimizer:\n",
    "    def __init__(self, method='sgd', lr=0.01, beta1=0.9, beta2=0.999, epsilon=1e-8):\n",
    "        self.method = method\n",
    "        self.lr = lr\n",
    "        self.beta1 = beta1\n",
    "        self.beta2 = beta2\n",
    "        self.epsilon = epsilon\n",
    "        self.m = None\n",
    "        self.v = None\n",
    "        self.t = 0\n",
    "\n",
    "    def update(self, params, grads):\n",
    "        if self.method == 'sgd':\n",
    "            return [p - self.lr * g for p, g in zip(params, grads)]\n",
    "        elif self.method == 'adam':\n",
    "            if self.m is None:\n",
    "                self.m = [np.zeros_like(p) for p in params]\n",
    "                self.v = [np.zeros_like(p) for p in params]\n",
    "            self.t += 1\n",
    "            self.m = [self.beta1 * m + (1 - self.beta1) * g for m, g in zip(self.m, grads)]\n",
    "            self.v = [self.beta2 * v + (1 - self.beta2) * (g ** 2) for v, g in zip(self.v, grads)]\n",
    "            m_hat = [m / (1 - self.beta1 ** self.t) for m in self.m]\n",
    "            v_hat = [v / (1 - self.beta2 ** self.t) for v in self.v]\n",
    "            return [p - self.lr * m / (np.sqrt(v) + self.epsilon) for p, m, v in zip(params, m_hat, v_hat)]\n",
    "        else:\n",
    "            raise ValueError(\"Invalid optimization method\")\n",
    "\n",
    "# 定义通用的CNN模型\n",
    "class CNN:\n",
    "    def __init__(self, layers):\n",
    "        self.layers = layers\n",
    "\n",
    "    def forward(self, input):\n",
    "        for layer in self.layers:\n",
    "            input = layer.forward(input)\n",
    "        return input\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate, reg_lambda):\n",
    "        for layer in reversed(self.layers):\n",
    "            d_L_d_out = layer.backward(d_L_d_out, learn_rate, reg_lambda)\n",
    "        return d_L_d_out\n",
    "\n",
    "    def set_training(self, training=True):\n",
    "        for layer in self.layers:\n",
    "            if isinstance(layer, BatchNormLayer) or isinstance(layer, DropoutLayer):\n",
    "                layer.set_training(training)\n",
    "\n",
    "# 定义训练函数\n",
    "def train(model, X_train, y_train, epochs, optimizer, reg_lambda):\n",
    "    max_grad_norm = 5.0\n",
    "    for epoch in range(epochs):\n",
    "        loss = 0\n",
    "        num_correct = 0\n",
    "        model.set_training(True)\n",
    "        for i in tqdm(range(len(X_train))):\n",
    "            x, y = X_train[i], y_train[i]\n",
    "            output = model.forward(x)\n",
    "\n",
    "            if np.argmax(output) == np.argmax(y):\n",
    "                num_correct += 1\n",
    "\n",
    "            loss += -np.log(output[np.argmax(y)])\n",
    "\n",
    "            grad = output\n",
    "            grad[np.argmax(y)] -= 1\n",
    "\n",
    "            grad_norm = np.linalg.norm(grad)\n",
    "            if grad_norm > max_grad_norm:\n",
    "                grad = grad * (max_grad_norm / grad_norm)\n",
    "\n",
    "            model.backward(grad, optimizer.lr, reg_lambda)\n",
    "\n",
    "        print(f'Epoch {epoch + 1}, Loss: {loss}, Accuracy: {num_correct / len(X_train)}')\n",
    "\n",
    "# 定义VGG架构\n",
    "def create_vgg(input_shape):\n",
    "    layers = [\n",
    "        ConvLayer(64, 3, 3, stride=1, padding=1),   # Conv1_1\n",
    "        BatchNormLayer(64),    # BN1_1\n",
    "        ConvLayer(64, 3, 64, stride=1, padding=1),  # Conv1_2\n",
    "        BatchNormLayer(64),    # BN1_2\n",
    "        MaxPoolLayer(2, 2),       # Pool1\n",
    "        \n",
    "        ConvLayer(128, 3, 64, stride=1, padding=1), # Conv2_1\n",
    "        BatchNormLayer(128),   # BN2_1\n",
    "        ConvLayer(128, 3, 128, stride=1, padding=1),# Conv2_2\n",
    "        BatchNormLayer(128),   # BN2_2\n",
    "        MaxPoolLayer(2, 2),       # Pool2\n",
    "        \n",
    "        ConvLayer(256, 3, 128, stride=1, padding=1),# Conv3_1\n",
    "        BatchNormLayer(256),   # BN3_1\n",
    "        ConvLayer(256, 3, 256, stride=1, padding=1),# Conv3_2\n",
    "        BatchNormLayer(256),   # BN3_2\n",
    "        ConvLayer(256, 3, 256, stride=1, padding=1),# Conv3_3\n",
    "        BatchNormLayer(256),   # BN3_3\n",
    "        MaxPoolLayer(2, 2),       # Pool3\n",
    "        \n",
    "        ConvLayer(512, 3, 256, stride=1, padding=1),# Conv4_1\n",
    "        BatchNormLayer(512),   # BN4_1\n",
    "        ConvLayer(512, 3, 512, stride=1, padding=1),# Conv4_2\n",
    "        BatchNormLayer(512),   # BN4_2\n",
    "        ConvLayer(512, 3, 512, stride=1, padding=1),# Conv4_3\n",
    "        BatchNormLayer(512),   # BN4_3\n",
    "        MaxPoolLayer(2, 2),       # Pool4\n",
    "        \n",
    "        ConvLayer(512, 3, 512, stride=1, padding=1),# Conv5_1\n",
    "        BatchNormLayer(512),   # BN5_1\n",
    "        ConvLayer(512, 3, 512, stride=1, padding=1),# Conv5_2\n",
    "        BatchNormLayer(512),   # BN5_2\n",
    "        ConvLayer(512, 3, 512, stride=1, padding=1),# Conv5_3\n",
    "        BatchNormLayer(512),   # BN5_3\n",
    "        MaxPoolLayer(2, 2),       # Pool5\n",
    "        \n",
    "        DenseLayer(512, 4096, activation='relu'),  # FC6\n",
    "        DropoutLayer(0.5),     # Dropout1\n",
    "        DenseLayer(4096, 4096, activation='relu'),         # FC7\n",
    "        DropoutLayer(0.5),     # Dropout2\n",
    "        DenseLayer(4096, 10, activation='softmax')         # FC8\n",
    "    ]\n",
    "    return CNN(layers)\n",
    "\n",
    "# 实例化并训练VGG模型\n",
    "model = create_vgg(x_train.shape[1:])\n",
    "optimizer = Optimizer(method='adam', lr=0.0001)\n",
    "train(model, x_train, y_train, epochs=10, optimizer=optimizer, reg_lambda=0.001)\n",
    "\n",
    "# 评估模型\n",
    "def evaluate(model, X_test, y_test):\n",
    "    correct = 0\n",
    "    model.set_training(False)\n",
    "    for i in range(len(X_test)):\n",
    "        x, y = X_test[i], y_test[i]\n",
    "        output = model.forward(x)\n",
    "        if np.argmax(output) == np.argmax(y):\n",
    "            correct += 1\n",
    "    accuracy = correct / len(X_test)\n",
    "    print(f'Test Accuracy: {accuracy * 100:.2f}%')\n",
    "\n",
    "evaluate(model, x_test, y_test)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "23fa6cf2-f5c9-40a2-b7dc-194645d18ccf",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "  2%|█▍                                                                          | 948/50000 [07:24<6:23:20,  2.13it/s]\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[2], line 232\u001b[0m\n\u001b[0;32m    230\u001b[0m model \u001b[38;5;241m=\u001b[39m create_simple_cnn(x_train\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m1\u001b[39m:])\n\u001b[0;32m    231\u001b[0m optimizer \u001b[38;5;241m=\u001b[39m Optimizer(method\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m'\u001b[39m\u001b[38;5;124madam\u001b[39m\u001b[38;5;124m'\u001b[39m, lr\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m0.0001\u001b[39m)\n\u001b[1;32m--> 232\u001b[0m \u001b[43mtrain\u001b[49m\u001b[43m(\u001b[49m\u001b[43mmodel\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mx_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43my_train\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mepochs\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m10\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43moptimizer\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43moptimizer\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mreg_lambda\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[38;5;241;43m0.001\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m    234\u001b[0m \u001b[38;5;66;03m# 评估模型\u001b[39;00m\n\u001b[0;32m    235\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mevaluate\u001b[39m(model, X_test, y_test):\n",
      "Cell \u001b[1;32mIn[2], line 197\u001b[0m, in \u001b[0;36mtrain\u001b[1;34m(model, X_train, y_train, epochs, optimizer, reg_lambda)\u001b[0m\n\u001b[0;32m    195\u001b[0m \u001b[38;5;28;01mfor\u001b[39;00m i \u001b[38;5;129;01min\u001b[39;00m tqdm(\u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mlen\u001b[39m(X_train))):\n\u001b[0;32m    196\u001b[0m     x, y \u001b[38;5;241m=\u001b[39m X_train[i], y_train[i]\n\u001b[1;32m--> 197\u001b[0m     output \u001b[38;5;241m=\u001b[39m \u001b[43mmodel\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mx\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m    199\u001b[0m     \u001b[38;5;28;01mif\u001b[39;00m np\u001b[38;5;241m.\u001b[39margmax(output) \u001b[38;5;241m==\u001b[39m np\u001b[38;5;241m.\u001b[39margmax(y):\n\u001b[0;32m    200\u001b[0m         num_correct \u001b[38;5;241m+\u001b[39m\u001b[38;5;241m=\u001b[39m \u001b[38;5;241m1\u001b[39m\n",
      "Cell \u001b[1;32mIn[2], line 177\u001b[0m, in \u001b[0;36mCNN.forward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m    175\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mforward\u001b[39m(\u001b[38;5;28mself\u001b[39m, \u001b[38;5;28minput\u001b[39m):\n\u001b[0;32m    176\u001b[0m     \u001b[38;5;28;01mfor\u001b[39;00m layer \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlayers:\n\u001b[1;32m--> 177\u001b[0m         \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m \u001b[43mlayer\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mforward\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43minput\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[0;32m    178\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28minput\u001b[39m\n",
      "Cell \u001b[1;32mIn[2], line 53\u001b[0m, in \u001b[0;36mConvLayer.forward\u001b[1;34m(self, input)\u001b[0m\n\u001b[0;32m     51\u001b[0m         \u001b[38;5;28;01mfor\u001b[39;00m f \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mrange\u001b[39m(\u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mnum_filters):\n\u001b[0;32m     52\u001b[0m             region \u001b[38;5;241m=\u001b[39m \u001b[38;5;28minput\u001b[39m[i \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride:i \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilter_size, j \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride:j \u001b[38;5;241m*\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mstride \u001b[38;5;241m+\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mfilter_size]\n\u001b[1;32m---> 53\u001b[0m             output[i, j, f] \u001b[38;5;241m=\u001b[39m \u001b[43mnp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43msum\u001b[49m\u001b[43m(\u001b[49m\u001b[43mregion\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfilters\u001b[49m\u001b[43m[\u001b[49m\u001b[43mf\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     54\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m relu(output)\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras.datasets import cifar10\n",
    "from tensorflow.keras.utils import to_categorical\n",
    "from tqdm import tqdm\n",
    "\n",
    "# 下载并加载 CIFAR-10 数据集\n",
    "(x_train, y_train), (x_test, y_test) = cifar10.load_data()\n",
    "x_train = x_train.astype('float32') / 255.0\n",
    "x_test = x_test.astype('float32') / 255.0\n",
    "y_train = to_categorical(y_train, 10)\n",
    "y_test = to_categorical(y_test, 10)\n",
    "\n",
    "# 定义激活函数\n",
    "def relu(x):\n",
    "    return np.maximum(0, x)\n",
    "\n",
    "def relu_derivative(x):\n",
    "    return x > 0\n",
    "\n",
    "def softmax(x):\n",
    "    exp_x = np.exp(x - np.max(x))\n",
    "    return exp_x / np.sum(exp_x)\n",
    "\n",
    "def softmax_derivative(x):\n",
    "    exp_x = np.exp(x - np.max(x))\n",
    "    return exp_x / np.sum(exp_x) * (1 - exp_x / np.sum(exp_x))\n",
    "\n",
    "# 定义卷积层\n",
    "class ConvLayer:\n",
    "    def __init__(self, num_filters, filter_size, input_depth, stride=1, padding=1):\n",
    "        self.num_filters = num_filters\n",
    "        self.filter_size = filter_size\n",
    "        self.input_depth = input_depth\n",
    "        self.stride = stride\n",
    "        self.padding = padding\n",
    "        self.filters = np.random.randn(num_filters, filter_size, filter_size, input_depth) * np.sqrt(2 / (filter_size * filter_size * input_depth))\n",
    "\n",
    "    def forward(self, input):\n",
    "        self.last_input = input\n",
    "        h, w, d = input.shape\n",
    "        if self.padding > 0:\n",
    "            input = np.pad(input, [(self.padding, self.padding), (self.padding, self.padding), (0, 0)], mode='constant')\n",
    "        self.last_padded_input = input\n",
    "        h_p, w_p, d_p = input.shape\n",
    "        out_h = (h_p - self.filter_size) // self.stride + 1\n",
    "        out_w = (w_p - self.filter_size) // self.stride + 1\n",
    "        output = np.zeros((out_h, out_w, self.num_filters))\n",
    "        for i in range(out_h):\n",
    "            for j in range(out_w):\n",
    "                for f in range(self.num_filters):\n",
    "                    region = input[i * self.stride:i * self.stride + self.filter_size, j * self.stride:j * self.stride + self.filter_size]\n",
    "                    output[i, j, f] = np.sum(region * self.filters[f])\n",
    "        return relu(output)\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate, reg_lambda):\n",
    "        d_L_d_filters = np.zeros(self.filters.shape)\n",
    "        d_L_d_input = np.zeros(self.last_padded_input.shape)\n",
    "        \n",
    "        for i in range(d_L_d_out.shape[0]):\n",
    "            for j in range(d_L_d_out.shape[1]):\n",
    "                for f in range(self.num_filters):\n",
    "                    region = self.last_padded_input[i * self.stride:i * self.stride + self.filter_size, j * self.stride:j * self.stride + self.filter_size]\n",
    "                    d_L_d_filters[f] += d_L_d_out[i, j, f] * region\n",
    "                    d_L_d_input[i * self.stride:i * self.stride + self.filter_size, j * self.stride:j * self.stride + self.filter_size] += d_L_d_out[i, j, f] * self.filters[f]\n",
    "\n",
    "        self.filters -= learn_rate * (d_L_d_filters + reg_lambda * self.filters)\n",
    "        if self.padding > 0:\n",
    "            d_L_d_input = d_L_d_input[self.padding:-self.padding, self.padding:-self.padding, :]\n",
    "        return d_L_d_input\n",
    "\n",
    "# 定义池化层\n",
    "class MaxPoolLayer:\n",
    "    def __init__(self, size, stride):\n",
    "        self.size = size\n",
    "        self.stride = stride\n",
    "\n",
    "    def forward(self, input):\n",
    "        self.last_input = input\n",
    "        h, w, num_filters = input.shape\n",
    "        output_h = (h - self.size) // self.stride + 1\n",
    "        output_w = (w - self.size) // self.stride + 1\n",
    "        output = np.zeros((output_h, output_w, num_filters))\n",
    "        for i in range(output_h):\n",
    "            for j in range(output_w):\n",
    "                region = input[i*self.stride:i*self.stride + self.size, j*self.stride:j*self.stride + self.size]\n",
    "                output[i, j] = np.max(region, axis=(0, 1))\n",
    "        return output\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate=None, reg_lambda=None):\n",
    "        d_L_d_input = np.zeros(self.last_input.shape)\n",
    "        for i in range(d_L_d_out.shape[0]):\n",
    "            for j in range(d_L_d_out.shape[1]):\n",
    "                for f in range(d_L_d_out.shape[2]):\n",
    "                    region = self.last_input[i*self.stride:i*self.stride + self.size, j*self.stride:j*self.stride + self.size, f]\n",
    "                    max_val = np.max(region)\n",
    "                    for i_region in range(region.shape[0]):\n",
    "                        for j_region in range(region.shape[1]):\n",
    "                            if region[i_region, j_region] == max_val:\n",
    "                                d_L_d_input[i*self.stride + i_region, j*self.stride + j_region, f] = d_L_d_out[i, j, f]\n",
    "        return d_L_d_input\n",
    "\n",
    "# 定义全连接层\n",
    "class DenseLayer:\n",
    "    def __init__(self, input_len, output_len, activation='relu'):\n",
    "        self.weights = np.random.randn(input_len, output_len) * np.sqrt(2 / input_len)\n",
    "        self.biases = np.zeros(output_len)\n",
    "        self.activation = activation\n",
    "\n",
    "    def forward(self, input):\n",
    "        self.last_input_shape = input.shape\n",
    "        self.last_input = input.flatten()\n",
    "        self.last_output = np.dot(self.last_input, self.weights) + self.biases\n",
    "        return self.activate(self.last_output)\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate, reg_lambda):\n",
    "        d_L_d_input = d_L_d_out * self.activate_derivative(self.last_output)\n",
    "        d_L_d_weights = np.dot(self.last_input[:, None], d_L_d_input[None, :])\n",
    "        d_L_d_biases = d_L_d_input\n",
    "        d_L_d_input = np.dot(d_L_d_input, self.weights.T).reshape(self.last_input_shape)\n",
    "\n",
    "        self.weights -= learn_rate * (d_L_d_weights + reg_lambda * self.weights)\n",
    "        self.biases -= learn_rate * d_L_d_biases\n",
    "        return d_L_d_input\n",
    "\n",
    "    def activate(self, x):\n",
    "        if self.activation == 'relu':\n",
    "            return relu(x)\n",
    "        elif self.activation == 'softmax':\n",
    "            return softmax(x)\n",
    "        else:\n",
    "            raise ValueError(\"Invalid activation function\")\n",
    "\n",
    "    def activate_derivative(self, x):\n",
    "        if self.activation == 'relu':\n",
    "            return relu_derivative(x)\n",
    "        elif self.activation == 'softmax':\n",
    "            return softmax_derivative(x)\n",
    "        else:\n",
    "            raise ValueError(\"Invalid activation function\")\n",
    "\n",
    "# 定义优化器\n",
    "class Optimizer:\n",
    "    def __init__(self, method='sgd', lr=0.01, beta1=0.9, beta2=0.999, epsilon=1e-8):\n",
    "        self.method = method\n",
    "        self.lr = lr\n",
    "        self.beta1 = beta1\n",
    "        self.beta2 = beta2\n",
    "        self.epsilon = epsilon\n",
    "        self.m = None\n",
    "        self.v = None\n",
    "        self.t = 0\n",
    "\n",
    "    def update(self, params, grads):\n",
    "        if self.method == 'sgd':\n",
    "            return [p - self.lr * g for p, g in zip(params, grads)]\n",
    "        elif self.method == 'adam':\n",
    "            if self.m is None:\n",
    "                self.m = [np.zeros_like(p) for p in params]\n",
    "                self.v = [np.zeros_like(p) for p in params]\n",
    "            self.t += 1\n",
    "            self.m = [self.beta1 * m + (1 - self.beta1) * g for m, g in zip(self.m, grads)]\n",
    "            self.v = [self.beta2 * v + (1 - self.beta2) * (g ** 2) for v, g in zip(self.v, grads)]\n",
    "            m_hat = [m / (1 - self.beta1 ** self.t) for m in self.m]\n",
    "            v_hat = [v / (1 - self.beta2 ** self.t) for v in self.v]\n",
    "            return [p - self.lr * m / (np.sqrt(v) + self.epsilon) for p, m, v in zip(params, m_hat, v_hat)]\n",
    "        else:\n",
    "            raise ValueError(\"Invalid optimization method\")\n",
    "\n",
    "# 定义通用的CNN模型\n",
    "class CNN:\n",
    "    def __init__(self, layers):\n",
    "        self.layers = layers\n",
    "\n",
    "    def forward(self, input):\n",
    "        for layer in self.layers:\n",
    "            input = layer.forward(input)\n",
    "        return input\n",
    "\n",
    "    def backward(self, d_L_d_out, learn_rate, reg_lambda):\n",
    "        for layer in reversed(self.layers):\n",
    "            d_L_d_out = layer.backward(d_L_d_out, learn_rate, reg_lambda)\n",
    "        return d_L_d_out\n",
    "\n",
    "    def set_training(self, training=True):\n",
    "        pass\n",
    "\n",
    "# 定义训练函数\n",
    "def train(model, X_train, y_train, epochs, optimizer, reg_lambda):\n",
    "    max_grad_norm = 5.0\n",
    "    for epoch in range(epochs):\n",
    "        loss = 0\n",
    "        num_correct = 0\n",
    "        model.set_training(True)\n",
    "        for i in tqdm(range(len(X_train))):\n",
    "            x, y = X_train[i], y_train[i]\n",
    "            output = model.forward(x)\n",
    "\n",
    "            if np.argmax(output) == np.argmax(y):\n",
    "                num_correct += 1\n",
    "\n",
    "            loss += -np.log(output[np.argmax(y)])\n",
    "\n",
    "            grad = output\n",
    "            grad[np.argmax(y)] -= 1\n",
    "\n",
    "            grad_norm = np.linalg.norm(grad)\n",
    "            if grad_norm > max_grad_norm:\n",
    "                grad = grad * (max_grad_norm / grad_norm)\n",
    "\n",
    "            model.backward(grad, optimizer.lr, reg_lambda)\n",
    "\n",
    "        print(f'Epoch {epoch + 1}, Loss: {loss}, Accuracy: {num_correct / len(X_train)}')\n",
    "\n",
    "# 定义简单的CNN架构\n",
    "def create_simple_cnn(input_shape):\n",
    "    layers = [\n",
    "        ConvLayer(32, 3, 3, stride=1, padding=1),   # Conv1\n",
    "        MaxPoolLayer(2, 2),                        # Pool1\n",
    "        \n",
    "        ConvLayer(64, 3, 32, stride=1, padding=1),  # Conv2\n",
    "        MaxPoolLayer(2, 2),                        # Pool2\n",
    "        \n",
    "        DenseLayer(8*8*64, 128, activation='relu'), # FC1\n",
    "        DenseLayer(128, 10, activation='softmax')    # FC2\n",
    "    ]\n",
    "    return CNN(layers)\n",
    "\n",
    "# 实例化并训练简单的CNN模型\n",
    "model = create_simple_cnn(x_train.shape[1:])\n",
    "optimizer = Optimizer(method='adam', lr=0.0001)\n",
    "train(model, x_train, y_train, epochs=10, optimizer=optimizer, reg_lambda=0.001)\n",
    "\n",
    "# 评估模型\n",
    "def evaluate(model, X_test, y_test):\n",
    "    correct = 0\n",
    "    model.set_training(False)\n",
    "    for i in range(len(X_test)):\n",
    "        x, y = X_test[i], y_test[i]\n",
    "        output = model.forward(x)\n",
    "        if np.argmax(output) == np.argmax(y):\n",
    "            correct += 1\n",
    "    accuracy = correct / len(X_test)\n",
    "    print(f'Test Accuracy: {accuracy * 100:.2f}%')\n",
    "\n",
    "evaluate(model, x_test, y_test)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8339f1b1-6b72-42d0-92d7-41ec4541d22d",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
