{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Implementing CNN with numpy\n",
    "> convolution layer\n",
    "> \n",
    "> pooling layer\n",
    "> \n",
    "> dense layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pandas as pd"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "convolution layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Convolution():\n",
    "    \"\"\"卷积层\"\"\"\n",
    "#                  权重   偏差  步幅        填充\n",
    "    def __init__(self,weight,bias,stride = 1,padding = 0):\n",
    "\n",
    "        self.weight = weight\n",
    "        self.bias = bias\n",
    "        self.stride = stride\n",
    "        self.padding = padding\n",
    "\n",
    "        self.x = None\n",
    "\n",
    "    def _convolution_(self,img,weight,bias = False):\n",
    "\n",
    "        # 输入数据形状(样本数 通道 宽度 高度)\n",
    "        batch_size, in_channel, in_height, in_width = img.shape\n",
    "\n",
    "        # 卷积核形状(输入通道 输出通道 宽度 高度)\n",
    "        in_channel, f_out_channel, f_height, f_width = weight.shape\n",
    "\n",
    "        # 输出数据的高和宽\n",
    "        out_height = 1 + int((in_height + 2 * self.padding - f_height) / self.stride)\n",
    "        out_width = 1 + int((in_width + 2 * self.padding - f_width) / self.stride)\n",
    "\n",
    "        # 卷积运算的输出\n",
    "        out =  np.zeros((batch_size,f_out_channel,out_height,out_width))\n",
    "\n",
    "        # 输出的形状 batch_size, f_out_channel, out_height, out_width\n",
    "        for b in np.arange(batch_size):\n",
    "            for c in np.arange(f_out_channel):\n",
    "                for h in np.arange(out_height):\n",
    "                    for w in np.arange(out_width):\n",
    "                        if bias == True:\n",
    "                            out[b,c,h // self.stride,w // self.stride] = np.sum(img[b,:,h:h + f_height,w:w + f_width] *\n",
    "                                                                            weight[:,c]) + self.bias[c]\n",
    "                        elif bias == False:\n",
    "                            out[b, c, h // self.stride, w // self.stride] = np.sum(\n",
    "                                img[b, :, h:h + f_height, w:w + f_width] * weight[:, c])\n",
    "        return out\n",
    "\n",
    "    def con_forward(self, x):\n",
    "        \"\"\"前向传播 输出卷积结果\"\"\"\n",
    "\n",
    "        # 填充图像\n",
    "        img = np.pad(x, [(0, 0), (0, 0), (self.padding, self.padding), (self.padding, self.padding)], 'constant')\n",
    "\n",
    "        # 计算卷积 加上偏置\n",
    "        out = self._convolution_(img,self.weight,bias = True)\n",
    "\n",
    "        self.x = x\n",
    "\n",
    "        return out\n",
    "\n",
    "    def con_backward(self,dout):\n",
    "        \"\"\"反向传播\"\"\"\n",
    "        # 卷积核形状(输入通道 输出通道 宽度 高度)\n",
    "        in_channel, f_out_channel, f_height, f_width = self.weight.shape\n",
    "\n",
    "        # 卷积核翻转180°\n",
    "        weight_180 = np.flip(self.weight,(2,3))\n",
    "\n",
    "        # 交换通道\n",
    "        weight_180 = np.swapaxes(weight_180,0,1)\n",
    "\n",
    "        # 填充\n",
    "        pad_out = np.pad(dout, [(0, 0), (0, 0), (f_height-1,f_height-1), (f_width-1,f_width-1)], 'constant')\n",
    "\n",
    "        # 前一层的δ\n",
    "        dout_last = self._convolution_(pad_out,weight_180,bias=False)\n",
    "\n",
    "        # 交换 输入x的通道 变为 channel batch H W\n",
    "        x = np.swapaxes(self.x,0,1)\n",
    "\n",
    "        # 权重的梯度\n",
    "        dw = self._convolution_(x,dout)\n",
    "\n",
    "        # 偏置的梯度\n",
    "        db = np.sum(np.sum(np.sum(dout, axis=-1), axis=-1),axis=0)\n",
    "\n",
    "        batch = self.x.shape[0]\n",
    "\n",
    "\n",
    "        return dw/batch,db/batch,dout_last"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "pooling layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Pooling():\n",
    "    \"\"\"池化层\"\"\"\n",
    "    def __init__(self,pool,stride = 2,padding = 0):\n",
    "\n",
    "        # 池化窗口的高和宽\n",
    "        self.pool_h = pool[0]\n",
    "        self.pool_w = pool[1]\n",
    "        self.padding = padding\n",
    "        self.stride = stride\n",
    "\n",
    "        self.x = None\n",
    "        self.list = []\n",
    "\n",
    "    def pool_forward(self,x):\n",
    "        \"\"\"前向传播\"\"\"\n",
    "\n",
    "        # 输入数据的形状\n",
    "        batch_size,channel,height,width = x.shape\n",
    "\n",
    "        # 填充\n",
    "        img = np.pad(x, [(0, 0), (0, 0), (self.padding, self.padding), (self.padding, self.padding)], 'constant')\n",
    "        self.x = img\n",
    "\n",
    "        # 输出数据的高和宽\n",
    "        out_h = (height - self.pool_h) // self.stride + 1\n",
    "        out_w = (width - self.pool_w) // self.stride + 1\n",
    "\n",
    "        # 输出数据\n",
    "        pool_out = np.zeros((batch_size,channel,out_h,out_w))\n",
    "\n",
    "        for b in np.arange(batch_size):\n",
    "            for c in np.arange(channel):\n",
    "                for h in np.arange(out_h):\n",
    "                    for w in np.arange(out_w):\n",
    "                        pool_out[b,c,h,w] = np.max(img[b,c,\n",
    "                                                   self.stride * h : self.stride * h + self.pool_h,\n",
    "                                                   self.stride * w : self.stride * w + self.pool_w])\n",
    "\n",
    "        return pool_out\n",
    "\n",
    "    def pool_backward(self,dout):\n",
    "\n",
    "        # 前一层数据形状\n",
    "        batch_size, channel, height, width = self.x.shape\n",
    "\n",
    "        # 池化层输出数据形状\n",
    "        _,_,out_h,out_w = dout.shape\n",
    "\n",
    "        # 前一层的δ\n",
    "        dout_last = np.zeros((batch_size,channel,height,width))\n",
    "\n",
    "        for b in np.arange(batch_size):\n",
    "            for c in np.arange(channel):\n",
    "                for h in np.arange(out_h):\n",
    "                    for w in np.arange(out_w):\n",
    "                        max_index = np.argmax(self.x[b,c,\n",
    "                                              self.stride * h: self.stride * h + self.pool_h,\n",
    "                                              self.stride * w: self.stride * w + self.pool_w])\n",
    "\n",
    "                        h_index = self.stride * h + max_index // self.pool_w\n",
    "                        w_index = self.stride * w + max_index % self.pool_w\n",
    "\n",
    "                        dout_last[b,c,h_index,w_index] += dout[b,c,h,w]\n",
    "\n",
    "        return dout_last"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "dense layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FullConnect():\n",
    "    \"\"\"全连接层\"\"\"\n",
    "    def __init__(self,input_nodes,hidden_nodes,output_nodes,learning_rate):\n",
    "\n",
    "        #初始化输入数据：输入层神经元个数、隐藏层神经元个数、输出层神经元个数、学习率\n",
    "        self.input = input_nodes\n",
    "        self.hidden = hidden_nodes\n",
    "        self.output = output_nodes\n",
    "        self.lr = learning_rate\n",
    "\n",
    "        # 输入层和隐藏层之间的权重            #高斯分布的均值           #标准差                #大小\n",
    "        self.weight_i_h = np.random.normal(0.0,             pow(self.hidden,- 0.5),(self.input,self.hidden))\n",
    "        # 隐藏层和输出层之间的权重\n",
    "        self.weight_h_o = np.random.normal(0.0,             pow(self.output,- 0.5),(self.hidden,self.output))\n",
    "        # sigmoid激活函数\n",
    "        self.sigmoid = lambda x: 1.0/(1 + np.exp(-x*1.0))\n",
    "\n",
    "    def fc_forward(self, input_data):\n",
    "        self.input_data = input_data\n",
    "        #计算隐藏层输入\n",
    "        hidden_input = np.dot(self.input_data, self.weight_i_h)\n",
    "        #计算隐藏层输出\n",
    "        self.hidden_output = self.sigmoid(hidden_input)\n",
    "        #计算输出层输入\n",
    "        final_input = np.dot(self.hidden_output, self.weight_h_o)\n",
    "        #计算输出层输出\n",
    "        self.final_output = self.sigmoid(final_input)\n",
    "\n",
    "        return self.final_output\n",
    "\n",
    "    def fc_backward(self,target):\n",
    "\n",
    "        #计算在输出层的损失\n",
    "        delta_h_o = (target -self.final_output) * self.final_output * (1-self.final_output)\n",
    "        #计算在隐藏层的损失\n",
    "        delta_i_h = delta_h_o.dot(self.weight_h_o.T) * self.hidden_output * (1-self.hidden_output)\n",
    "        #计算输入层(展平层flatten)的损失\n",
    "        delta_flatten = delta_i_h.dot(self.weight_i_h.T)\n",
    "        #隐藏层_输出层权重更新\n",
    "        delta_weight_h_o = self.lr * self.hidden_output.T.dot(delta_h_o)\n",
    "        self.weight_h_o += delta_weight_h_o\n",
    "        #输入层_隐藏层权重更新\n",
    "        delta_weight_i_h = self.lr * self.input_data.T.dot(delta_i_h)\n",
    "        self.weight_i_h += delta_weight_i_h\n",
    "\n",
    "        return delta_flatten"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "activation layer"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Activation():\n",
    "    \"\"\"激活层\"\"\"\n",
    "\n",
    "    def relu_forward(self,input):\n",
    "\n",
    "        self.input = input\n",
    "        return np.maximum(0,input)\n",
    "\n",
    "    def relu_backward(self,next_dout):\n",
    "\n",
    "        dout = np.where(np.greater(self.input, 0), next_dout, 0)\n",
    "        return dout"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "mean_squared_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def mean_squared_loss(y_predict, y_true):\n",
    "    \"\"\"\n",
    "    均方误差损失函数\n",
    "    :param y_predict: 预测值,shape (N,d)，N为批量样本数\n",
    "    :param y_true: 真实值\n",
    "    :return:\n",
    "    \"\"\"\n",
    "    loss = np.mean(np.sum(np.square(y_predict - y_true), axis=-1))  # 损失函数值\n",
    "    dy = y_predict - y_true  # 损失函数关于网络输出的梯度\n",
    "    return loss, dy"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "util"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def im2col(input_data,f_height,f_width,stride = 1,padding = 0):\n",
    "    \"\"\"优化卷积算法 将原来矩阵展开\"\"\"\n",
    "    out_channel,in_channel,height,width = input_data.shape\n",
    "\n",
    "    out_height = 1 + (height + 2 * padding - f_height) // stride\n",
    "    out_width = 1 + (width + 2 * padding - f_width) // stride\n",
    "\n",
    "    img = np.pad(input_data,[(0,0),(0,0),(padding,padding),(padding,padding)],'constant')\n",
    "    col = np.zeros((out_channel,in_channel,f_height,f_width,out_height,out_width))\n",
    "\n",
    "    for y in range(f_height):\n",
    "        y_max = y + stride * out_height\n",
    "        for x in range(f_width):\n",
    "            x_max = x + stride * out_width\n",
    "            col[:, :, y, x, :, :] = img[:, :, y:y_max:stride, x:x_max:stride]\n",
    "\n",
    "    # 坐标轴的变换\n",
    "    col = col.transpose(0, 4, 5, 1, 2, 3).reshape(out_channel * out_height * out_width, -1)\n",
    "\n",
    "    return col\n",
    "\n",
    "def col2im(col,input_shape,f_height,f_width,stride = 1,padding = 0):\n",
    "    \"\"\"将展开的矩阵还原\"\"\"\n",
    "\n",
    "    N,C,height,width = input_shape\n",
    "\n",
    "    out_height = (height + 2 * padding - f_height) // stride + 1\n",
    "    out_width = (width + 2 * padding - f_width) // stride + 1\n",
    "    col = col.reshape(N, out_height, out_width, C, f_height, f_width).transpose(0, 3, 4, 5, 1, 2)\n",
    "\n",
    "    img = np.zeros((N, C, height + 2*padding + stride - 1, width + 2*padding + stride - 1))\n",
    "    for y in range(f_height):\n",
    "        y_max = y + stride*out_height\n",
    "        for x in range(f_width):\n",
    "            x_max = x + stride*out_width\n",
    "            img[:, :, y:y_max:stride, x:x_max:stride] += col[:, :, y, x, :, :]\n",
    "\n",
    "    return img[:, :, padding:height + padding, padding:width + padding]\n",
    "\n",
    "def translate(y):\n",
    "    train_y = np.zeros([y.shape[0], 10])\n",
    "    for i in range(y.shape[0]):\n",
    "        for j in range(10):\n",
    "            if y[i] == j:\n",
    "                train_y[i][j] = 1\n",
    "    return train_y"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# ==================== 参数初始化 ====================\n",
    "batch = 1\n",
    "in_channel = 1\n",
    "input_size = (28,28)\n",
    "\n",
    "out_channel = 4\n",
    "filter_size = (3,3)\n",
    "stride = 1\n",
    "padding = 0\n",
    "\n",
    "pool_size = (2,2)\n",
    "\n",
    "fc_lr = 0.01\n",
    "conv_lr = 0.001\n",
    "\n",
    "conv_weight = np.random.randn(in_channel,out_channel,filter_size[0],filter_size[1])\n",
    "conv_bias = np.zeros(out_channel)\n",
    "\n",
    "flatten_h = (1 + (input_size[0] + 2*padding - filter_size[0]) // stride) // pool_size[0]\n",
    "flatten_w = (1 + (input_size[1] + 2*padding - filter_size[1]) // stride) // pool_size[1]\n",
    "flatten_shape = (batch,out_channel,flatten_h,flatten_w)\n",
    "\n",
    "fc_input = flatten_shape[1] * flatten_shape[2] * flatten_shape[3]\n",
    "\n",
    "fc_hidden = 800\n",
    "fc_output = 10\n",
    "# ===================================================\n",
    "\n",
    "conv = Convolution(conv_weight,conv_bias,stride,padding)\n",
    "act = Activation()\n",
    "pool = Pooling(pool_size)\n",
    "fc = FullConnect(fc_input,fc_hidden,fc_output,fc_lr)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def cnn_forward(input):\n",
    "\t\"\"\"前向传播\"\"\"\n",
    "\n",
    "\tconv_out = conv.con_forward(input)\n",
    "\n",
    "\trelu_out = act.relu_forward(conv_out)\n",
    "\n",
    "\tpool_out = pool.pool_forward(relu_out)\n",
    "\tpool_out = pool_out\n",
    "\n",
    "\tflatten_out = pool_out.reshape(pool_out.shape[0],pool_out.shape[1]*\n",
    "\t                               pool_out.shape[2] * pool_out.shape[3])\n",
    "\n",
    "\tfc_out = fc.fc_forward(flatten_out)\n",
    "\n",
    "\treturn fc_out\n",
    "\n",
    "def cnn_backward(target):\n",
    "\t\"\"\"反向传播\"\"\"\n",
    "\n",
    "\td_flatten = fc.fc_backward(target)\n",
    "\n",
    "\td_pool = d_flatten.reshape(flatten_shape)\n",
    "\n",
    "\td_relu = pool.pool_backward(d_pool)\n",
    "\n",
    "\td_conv = act.relu_backward(d_relu)\n",
    "\n",
    "\tdw,db,_ = conv.con_backward(d_conv)\n",
    "\n",
    "\treturn dw,db,_"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "train_x: \n",
      "[[0 0 0 ... 0 0 0]\n",
      " [0 0 0 ... 0 0 0]\n",
      " [0 0 0 ... 0 0 0]\n",
      " ...\n",
      " [0 0 0 ... 0 0 0]\n",
      " [0 0 0 ... 0 0 0]\n",
      " [0 0 0 ... 0 0 0]]\n",
      "y_train: \n",
      "[5 0 4 ... 5 6 8]\n",
      "test_x: \n",
      "[[0 0 0 ... 0 0 0]\n",
      " [0 0 0 ... 0 0 0]\n",
      " [0 0 0 ... 0 0 0]\n",
      " ...\n",
      " [0 0 0 ... 0 0 0]\n",
      " [0 0 0 ... 0 0 0]\n",
      " [0 0 0 ... 0 0 0]]\n",
      "y_test: \n",
      "[7 2 1 ... 4 5 6]\n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[16], line 29\u001b[0m\n\u001b[0;32m     26\u001b[0m \u001b[38;5;28minput\u001b[39m \u001b[38;5;241m=\u001b[39m (train_x[j]\u001b[38;5;241m/\u001b[39m\u001b[38;5;241m255\u001b[39m)\u001b[38;5;241m.\u001b[39mreshape(\u001b[38;5;241m1\u001b[39m,\u001b[38;5;241m-\u001b[39m\u001b[38;5;241m1\u001b[39m)\n\u001b[0;32m     27\u001b[0m img \u001b[38;5;241m=\u001b[39m \u001b[38;5;28minput\u001b[39m\u001b[38;5;241m.\u001b[39mreshape(batch,in_channel,input_size[\u001b[38;5;241m0\u001b[39m],input_size[\u001b[38;5;241m1\u001b[39m])\n\u001b[1;32m---> 29\u001b[0m out \u001b[38;5;241m=\u001b[39m \u001b[43mcnn_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mimg\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     30\u001b[0m y \u001b[38;5;241m=\u001b[39m np\u001b[38;5;241m.\u001b[39margmax(out,axis\u001b[38;5;241m=\u001b[39m\u001b[38;5;241m1\u001b[39m)\n\u001b[0;32m     32\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m y \u001b[38;5;241m==\u001b[39m y_train[j]:\n",
      "Cell \u001b[1;32mIn[10], line 14\u001b[0m, in \u001b[0;36mcnn_forward\u001b[1;34m(input)\u001b[0m\n\u001b[0;32m      9\u001b[0m pool_out \u001b[38;5;241m=\u001b[39m pool_out\n\u001b[0;32m     11\u001b[0m flatten_out \u001b[38;5;241m=\u001b[39m pool_out\u001b[38;5;241m.\u001b[39mreshape(pool_out\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m0\u001b[39m],pool_out\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m1\u001b[39m]\u001b[38;5;241m*\u001b[39m\n\u001b[0;32m     12\u001b[0m                                pool_out\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m2\u001b[39m] \u001b[38;5;241m*\u001b[39m pool_out\u001b[38;5;241m.\u001b[39mshape[\u001b[38;5;241m3\u001b[39m])\n\u001b[1;32m---> 14\u001b[0m fc_out \u001b[38;5;241m=\u001b[39m \u001b[43mfc\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfc_forward\u001b[49m\u001b[43m(\u001b[49m\u001b[43mflatten_out\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     16\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m fc_out\n",
      "Cell \u001b[1;32mIn[5], line 21\u001b[0m, in \u001b[0;36mFullConnect.fc_forward\u001b[1;34m(self, input_data)\u001b[0m\n\u001b[0;32m     19\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39minput_data \u001b[38;5;241m=\u001b[39m input_data\n\u001b[0;32m     20\u001b[0m \u001b[38;5;66;03m#计算隐藏层输入\u001b[39;00m\n\u001b[1;32m---> 21\u001b[0m hidden_input \u001b[38;5;241m=\u001b[39m \u001b[43mnp\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mdot\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43minput_data\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mweight_i_h\u001b[49m\u001b[43m)\u001b[49m\n\u001b[0;32m     22\u001b[0m \u001b[38;5;66;03m#计算隐藏层输出\u001b[39;00m\n\u001b[0;32m     23\u001b[0m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mhidden_output \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39msigmoid(hidden_input)\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "# 读取训练数据\n",
    "mnist_train = pd.read_csv(\"./mnist_train.csv\")\n",
    "train_x = mnist_train.values[ : ,1:785]\n",
    "print(\"train_x: \")\n",
    "print(train_x)\n",
    "y_train = mnist_train.values[ : ,0]\n",
    "print(\"y_train: \")\n",
    "print(y_train)\n",
    "train_y = translate(y_train)\n",
    "\n",
    "# 读取测试数据\n",
    "mnist_test = pd.read_csv(\"./mnist_test.csv\")\n",
    "test_x = mnist_test.values[ : ,1:785]\n",
    "print(\"test_x: \")\n",
    "print(test_x)\n",
    "y_test = mnist_test.values[ : ,0]\n",
    "print(\"y_test: \")\n",
    "print(y_test)\n",
    "test_y = translate(y_test)\n",
    "\n",
    "# 训练\n",
    "for i in range(20):\n",
    "    correct_rate = 0.0\n",
    "    for j in range(train_x.shape[0]):\n",
    "\n",
    "        input = (train_x[j]/255).reshape(1,-1)\n",
    "        img = input.reshape(batch,in_channel,input_size[0],input_size[1])\n",
    "\n",
    "        out = cnn_forward(img)\n",
    "        y = np.argmax(out,axis=1)\n",
    "\n",
    "        if y == y_train[j]:\n",
    "            correct_rate += 1\n",
    "        dw,db,_ = cnn_backward(train_y[j])\n",
    "        conv_weight -= conv_lr * dw\n",
    "        conv_bias -= conv_lr * db\n",
    "\n",
    "    print('step',i,'训练精度:', correct_rate / 100)\n",
    "\n",
    "# 测试\n",
    "test_acc = 0.0\n",
    "for i in range(100):\n",
    "    test_img = (test_x[i]/255).reshape(batch,in_channel,input_size[0],input_size[1])\n",
    "    test_out = cnn_forward(test_img)\n",
    "    y_predict = np.argmax(test_out,axis=1)\n",
    "    if y_predict == y_test[i]:\n",
    "        test_acc += 1\n",
    "    print('预测值',y_predict,'真实值',y_test[i])\n",
    "\n",
    "print('测试精度:',test_acc/100)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "base",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
