{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import pdb\n",
    "\n",
    "from activations import Relu\n",
    "from convolution import Conv2d, Maxpool, BatchNorm2d, Flatten\n",
    "from lossfunctions import CrossEntropyLoss\n",
    "from linear import Linear, Dropout\n",
    "from datasets import Mnist\n",
    "from optim import SGD\n",
    "from utils import label_encoder, shuffle_data"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class MyNet():\n",
    "    def __init__(self):\n",
    "        self.conv1 = Conv2d(in_channels=1, n_filter=30, filter_size=(3, 3), padding=1, stride=1)\n",
    "        self.relu1 = Relu()\n",
    "        self.bn1 = BatchNorm2d(n_channel=30, momentum=0.1)\n",
    "        self.maxpool1 = Maxpool(size=2, stride=2)  # 对于mnist，此时输出feature size为14×14\n",
    "        \n",
    "        self.conv2 = Conv2d(in_channels=30, n_filter=20, filter_size=(3, 3), padding=1, stride=1)\n",
    "        self.relu2 = Relu()\n",
    "        self.bn2 = BatchNorm2d(n_channel=20, momentum=0.1)\n",
    "        self.maxpool2 = Maxpool(size=2, stride=2)  # 与maxpoll1层是一样的，可以舍弃只用maxpool1\n",
    "        # 对于mnist，此时输出feature size为7×7\n",
    "        \n",
    "        self.conv3 = Conv2d(in_channels=20, n_filter=10, filter_size=(3, 3), padding=0, stride=1)\n",
    "        # 对于mnist，此时输出feature size为5×5\n",
    "        self.relu3 = Relu()\n",
    "        self.bn3 = BatchNorm2d(n_channel=10, momentum=0.1)\n",
    "        \n",
    "        self.flatten = Flatten()\n",
    "        \n",
    "        # 全连接层\n",
    "        self.fc1 = Linear(dim_in=250, dim_out=100)\n",
    "        self.dropout1 = Dropout(p=0.5)\n",
    "        self.fc2 = Linear(dim_in=100, dim_out=10)\n",
    "        \n",
    "        self.parameters = self.conv1.params + self.bn1.params + self.conv2.params + self.bn2.params + \\\n",
    "                            self.conv3.params + self.bn3.params + self.fc1.params + self.fc2.params\n",
    "    \n",
    "    def __call__(self, X, mode='train'):\n",
    "        \"\"\"\n",
    "        mode: 是在训练阶段还是测试阶段. train 或者 test\n",
    "        \"\"\"\n",
    "        return self.forward(X, mode)\n",
    "    \n",
    "    def forward(self, X, mode):\n",
    "        conv1_out = self.conv1(X)\n",
    "        relu1_out = self.relu1(conv1_out)\n",
    "        bn1_out = self.bn1(relu1_out, mode)\n",
    "        pool1_out = self.maxpool1(bn1_out)\n",
    "        \n",
    "        conv2_out = self.conv2(pool1_out)\n",
    "        relu2_out = self.relu2(conv2_out)\n",
    "        bn2_out = self.bn2(relu2_out, mode)\n",
    "        pool2_out = self.maxpool2(bn2_out)\n",
    "        \n",
    "        conv3_out = self.conv3(pool2_out)\n",
    "        relu3_out = self.relu3(conv3_out)\n",
    "        bn3_out = self.bn3(relu3_out, mode)\n",
    "        \n",
    "        flat_out = self.flatten(bn3_out)\n",
    "        \n",
    "        fc1_out = self.fc1(flat_out)\n",
    "        drop1_out = self.dropout1(fc1_out, mode)\n",
    "        fc2_out = self.fc2(drop1_out)\n",
    "        \n",
    "        return fc2_out\n",
    "    \n",
    "    def backward(self, d_out):\n",
    "        d_fc2_x, d_fc2_params = self.fc2.backward(d_out)\n",
    "        d_drop1_x = self.dropout1.backward(d_fc2_x)\n",
    "        d_fc1_x, d_fc1_params = self.fc1.backward(d_drop1_x)\n",
    "        \n",
    "        d_flat_x = self.flatten.backward(d_fc1_x)\n",
    "        \n",
    "        d_bn3_x, d_bn3_params = self.bn3.backward(d_flat_x)\n",
    "        d_relu3_x = self.relu3.backward(d_bn3_x)\n",
    "        d_conv3_x, d_conv3_params = self.conv3.backward(d_relu3_x)\n",
    "        \n",
    "        d_pool2_x = self.maxpool2.backward(d_conv3_x)\n",
    "        d_bn2_x, d_bn2_params = self.bn2.backward(d_pool2_x)\n",
    "        d_relu2_x = self.relu2.backward(d_bn2_x)\n",
    "        d_conv2_x, d_conv2_params = self.conv2.backward(d_relu2_x)\n",
    "        \n",
    "        d_pool1_x = self.maxpool1.backward(d_conv2_x)\n",
    "        d_bn1_x, d_bn1_params = self.bn1.backward(d_pool1_x)\n",
    "        d_relu1_x = self.relu1.backward(d_bn1_x)\n",
    "        _, d_conv1_params = self.conv1.backward(d_relu1_x)\n",
    "        \n",
    "        grads = d_conv1_params + d_bn1_params + d_conv2_params + d_bn2_params + d_conv3_params + \\\n",
    "                d_bn3_params + d_fc1_params + d_fc2_params\n",
    "        \n",
    "        return grads"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 定义训练方式"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(train_datas, train_labels, test_datas, test_labels, network, \n",
    "          loss_fc, optim, epochs, batch_size):\n",
    "    for epoch in range(1, epochs + 1):\n",
    "        print(\"**\" * 20, \"epoch: %d\" % epoch, \"**\" * 20)\n",
    "        \n",
    "        # 打乱数据\n",
    "        shuffled_datas, shuffled_labels = shuffle_data(train_datas, train_labels)\n",
    "        \n",
    "        n_correct = 0  # 预测正确的个数\n",
    "        n_data = train_datas.shape[0]  # 总样本数\n",
    "        loss_sum = 0.0\n",
    "        \n",
    "        num_trained_batchs = 0  # 记录当前epoch训练的batch数目\n",
    "        for i in range(0, n_data, batch_size):\n",
    "            batch_datas, batch_labels = shuffled_datas[i : i+batch_size], shuffled_labels[i : i+batch_size]\n",
    "            \n",
    "            net_out = network(batch_datas, mode='train')  # 模型输出\n",
    "            \n",
    "            batch_loss = loss_fc(net_out, batch_labels)  # 计算损失\n",
    "            loss_sum += batch_loss\n",
    "            \n",
    "            grad_out = loss_fc.backward()  # 计算loss对模型输出的梯度\n",
    "            # 计算模型所有参数的梯度，更新参数\n",
    "            grads = network.backward(grad_out)\n",
    "            optim.update_parameters(grads)\n",
    "            \n",
    "            # pdb.set_trace()\n",
    "            \n",
    "            # 计算预测正确的个数\n",
    "            predict = np.argmax(net_out, axis=1)\n",
    "            n_correct += np.sum(predict == np.argmax(batch_labels, axis=1))\n",
    "            \n",
    "            num_trained_batchs += 1\n",
    "            if num_trained_batchs % 100 == 0:\n",
    "                ave_loss = loss_sum / num_trained_batchs\n",
    "                print(\"*\" * 20, \"%d th batch, loss: %f\" % (num_trained_batchs, ave_loss), \"*\" * 20)\n",
    "        \n",
    "        train_acc = n_correct / n_data\n",
    "        \n",
    "        test_acc = test(test_datas, test_labels, network, batch_size)\n",
    "        \n",
    "        print(\"epoch %d, train accuracy: %f  test accuracy: %f\" % (epoch, train_acc, test_acc))\n",
    "            \n",
    "\n",
    "def test(test_datas, test_labels, network, batch_size):\n",
    "    \"\"\"\n",
    "    测试时使用批量化喂数据，否则内存不够\n",
    "    test_labels: one hot\n",
    "    return: 测试准确率\n",
    "    \"\"\"\n",
    "    n_data = test_labels.shape[0]\n",
    "    n_correct = 0\n",
    "    for i in range(0, n_data, batch_size):\n",
    "        batch_datas = test_datas[i : i + batch_size]\n",
    "        batch_labels = test_labels[i : i + batch_size]\n",
    "        net_out = network(batch_datas, mode='test')\n",
    "        predict = np.argmax(net_out, axis=1)\n",
    "        n_correct += np.sum(predict == np.argmax(batch_labels, axis=1))\n",
    "        \n",
    "    test_acc = n_correct / n_data\n",
    "    return test_acc"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 加载mnist数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "魔数:2051, 图片数量: 60000张, 图片大小: 28*28\n",
      "16\n",
      ">784B 16 784\n",
      "已解析 10000张\n",
      "7839232\n",
      "已解析 20000张\n",
      "15679232\n",
      "已解析 30000张\n",
      "23519232\n",
      "已解析 40000张\n",
      "31359232\n",
      "已解析 50000张\n",
      "39199232\n",
      "已解析 60000张\n",
      "47039232\n",
      "魔数:2049, 图片数量: 60000张\n",
      "已解析 10000张\n",
      "已解析 20000张\n",
      "已解析 30000张\n",
      "已解析 40000张\n",
      "已解析 50000张\n",
      "已解析 60000张\n",
      "魔数:2051, 图片数量: 10000张, 图片大小: 28*28\n",
      "16\n",
      ">784B 16 784\n",
      "已解析 10000张\n",
      "7839232\n",
      "魔数:2049, 图片数量: 10000张\n",
      "已解析 10000张\n"
     ]
    }
   ],
   "source": [
    "train_image_path =  r'D:\\datas\\mnist\\train-images.idx3-ubyte'\n",
    "train_label_path =  r'D:\\datas\\mnist\\train-labels.idx1-ubyte'\n",
    "test_image_path =  r'D:\\datas\\mnist\\t10k-images.idx3-ubyte'\n",
    "test_label_path =  r'D:\\datas\\mnist\\t10k-labels.idx1-ubyte'\n",
    "# 训练样本：共60000个\n",
    "# 测试样本：共10000个\n",
    "\n",
    "# 声明mnist数据实例\n",
    "mnist = Mnist(train_image_path, train_label_path, test_image_path, test_label_path)\n",
    "\n",
    "# 获得训练数据\n",
    "train_images = mnist.load_train_images()\n",
    "train_labels = mnist.load_train_labels()\n",
    "train_images = train_images[:, np.newaxis, :, :]  # 增加通道维度，卷积网络输入为4维[N, C, H, W], mnist数据加载后是3维[N, H, W]\n",
    "train_labels = np.array(train_labels, np.int32)  # 原类型时float类型，转化为int，否则转换成one-hot会出错\n",
    "train_labels = label_encoder(train_labels, 10)\n",
    "\n",
    "# 获得测试数据\n",
    "test_images = mnist.load_test_images()\n",
    "test_labels = mnist.load_test_labels()\n",
    "test_images = test_images[:, np.newaxis, :, :]\n",
    "test_labels = np.array(test_labels, np.int32)\n",
    "test_labels = label_encoder(test_labels, 10)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 开始训练"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 使用SGD训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "**************************************** epoch: 1 ****************************************\n",
      "******************** 100 th batch, loss: 2.294114 ********************\n",
      "******************** 200 th batch, loss: 1.674324 ********************\n",
      "******************** 300 th batch, loss: 1.360098 ********************\n",
      "******************** 400 th batch, loss: 1.172868 ********************\n",
      "******************** 500 th batch, loss: 1.037376 ********************\n",
      "******************** 600 th batch, loss: 0.939488 ********************\n",
      "******************** 700 th batch, loss: 0.866106 ********************\n",
      "******************** 800 th batch, loss: 0.806912 ********************\n",
      "******************** 900 th batch, loss: 0.756997 ********************\n",
      "******************** 1000 th batch, loss: 0.714366 ********************\n",
      "******************** 1100 th batch, loss: 0.675957 ********************\n",
      "******************** 1200 th batch, loss: 0.647047 ********************\n",
      "******************** 1300 th batch, loss: 0.616233 ********************\n",
      "******************** 1400 th batch, loss: 0.594029 ********************\n",
      "******************** 1500 th batch, loss: 0.571482 ********************\n",
      "******************** 1600 th batch, loss: 0.552422 ********************\n",
      "******************** 1700 th batch, loss: 0.535268 ********************\n",
      "******************** 1800 th batch, loss: 0.518393 ********************\n",
      "******************** 1900 th batch, loss: 0.503792 ********************\n",
      "******************** 2000 th batch, loss: 0.490532 ********************\n",
      "epoch 1, train accuracy: 0.849500  test accuracy: 0.955400\n",
      "**************************************** epoch: 2 ****************************************\n",
      "******************** 100 th batch, loss: 0.232883 ********************\n",
      "******************** 200 th batch, loss: 0.230598 ********************\n",
      "******************** 300 th batch, loss: 0.219935 ********************\n",
      "******************** 400 th batch, loss: 0.216815 ********************\n",
      "******************** 500 th batch, loss: 0.218382 ********************\n",
      "******************** 600 th batch, loss: 0.213029 ********************\n",
      "******************** 700 th batch, loss: 0.209409 ********************\n",
      "******************** 800 th batch, loss: 0.207112 ********************\n",
      "******************** 900 th batch, loss: 0.204232 ********************\n",
      "******************** 1000 th batch, loss: 0.205093 ********************\n",
      "******************** 1100 th batch, loss: 0.205172 ********************\n",
      "******************** 1200 th batch, loss: 0.202066 ********************\n",
      "******************** 1300 th batch, loss: 0.198654 ********************\n",
      "******************** 1400 th batch, loss: 0.198053 ********************\n",
      "******************** 1500 th batch, loss: 0.195828 ********************\n",
      "******************** 1600 th batch, loss: 0.194917 ********************\n",
      "******************** 1700 th batch, loss: 0.193736 ********************\n",
      "******************** 1800 th batch, loss: 0.192274 ********************\n",
      "******************** 1900 th batch, loss: 0.191177 ********************\n",
      "******************** 2000 th batch, loss: 0.189747 ********************\n",
      "epoch 2, train accuracy: 0.940500  test accuracy: 0.967900\n",
      "**************************************** epoch: 3 ****************************************\n",
      "******************** 100 th batch, loss: 0.157776 ********************\n",
      "******************** 200 th batch, loss: 0.160244 ********************\n",
      "******************** 300 th batch, loss: 0.158695 ********************\n",
      "******************** 400 th batch, loss: 0.161886 ********************\n",
      "******************** 500 th batch, loss: 0.158724 ********************\n",
      "******************** 600 th batch, loss: 0.156167 ********************\n",
      "******************** 700 th batch, loss: 0.157858 ********************\n",
      "******************** 800 th batch, loss: 0.159318 ********************\n",
      "******************** 900 th batch, loss: 0.157177 ********************\n",
      "******************** 1000 th batch, loss: 0.158159 ********************\n",
      "******************** 1100 th batch, loss: 0.158121 ********************\n",
      "******************** 1200 th batch, loss: 0.157814 ********************\n",
      "******************** 1300 th batch, loss: 0.156787 ********************\n",
      "******************** 1400 th batch, loss: 0.154653 ********************\n",
      "******************** 1500 th batch, loss: 0.155393 ********************\n",
      "******************** 1600 th batch, loss: 0.156707 ********************\n",
      "******************** 1700 th batch, loss: 0.155441 ********************\n",
      "******************** 1800 th batch, loss: 0.154288 ********************\n",
      "******************** 1900 th batch, loss: 0.153360 ********************\n",
      "******************** 2000 th batch, loss: 0.152392 ********************\n",
      "epoch 3, train accuracy: 0.952967  test accuracy: 0.973100\n"
     ]
    }
   ],
   "source": [
    "# 初始化网络\n",
    "mynet = MyNet()\n",
    "\n",
    "# 声明损失函数\n",
    "cross_entropy = CrossEntropyLoss()\n",
    "\n",
    "sgd = SGD(mynet.parameters, lr=0.01)\n",
    "\n",
    "train(train_images, train_labels, test_images, test_labels, network=mynet,\n",
    "      loss_fc=cross_entropy, optim=sgd, epochs=3, batch_size=30)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 使用Momentum SGD训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "**************************************** epoch: 1 ****************************************\n",
      "******************** 100 th batch, loss: 1.201727 ********************\n",
      "******************** 200 th batch, loss: 0.800054 ********************\n",
      "******************** 300 th batch, loss: 0.627133 ********************\n",
      "******************** 400 th batch, loss: 0.533434 ********************\n",
      "******************** 500 th batch, loss: 0.474872 ********************\n",
      "******************** 600 th batch, loss: 0.432316 ********************\n",
      "******************** 700 th batch, loss: 0.400252 ********************\n",
      "******************** 800 th batch, loss: 0.374126 ********************\n",
      "******************** 900 th batch, loss: 0.352210 ********************\n",
      "******************** 1000 th batch, loss: 0.333141 ********************\n",
      "******************** 1100 th batch, loss: 0.319562 ********************\n",
      "******************** 1200 th batch, loss: 0.307324 ********************\n",
      "******************** 1300 th batch, loss: 0.295901 ********************\n",
      "******************** 1400 th batch, loss: 0.285395 ********************\n",
      "******************** 1500 th batch, loss: 0.275471 ********************\n",
      "******************** 1600 th batch, loss: 0.266668 ********************\n",
      "******************** 1700 th batch, loss: 0.259145 ********************\n",
      "******************** 1800 th batch, loss: 0.251230 ********************\n",
      "******************** 1900 th batch, loss: 0.245891 ********************\n",
      "******************** 2000 th batch, loss: 0.240104 ********************\n",
      "epoch 1, train accuracy: 0.927817  test accuracy: 0.975000\n",
      "**************************************** epoch: 2 ****************************************\n",
      "******************** 100 th batch, loss: 0.115397 ********************\n",
      "******************** 200 th batch, loss: 0.117541 ********************\n",
      "******************** 300 th batch, loss: 0.110366 ********************\n",
      "******************** 400 th batch, loss: 0.117021 ********************\n",
      "******************** 500 th batch, loss: 0.116808 ********************\n",
      "******************** 600 th batch, loss: 0.117222 ********************\n",
      "******************** 700 th batch, loss: 0.116315 ********************\n",
      "******************** 800 th batch, loss: 0.116487 ********************\n",
      "******************** 900 th batch, loss: 0.116482 ********************\n",
      "******************** 1000 th batch, loss: 0.117153 ********************\n",
      "******************** 1100 th batch, loss: 0.116400 ********************\n",
      "******************** 1200 th batch, loss: 0.116582 ********************\n",
      "******************** 1300 th batch, loss: 0.116381 ********************\n",
      "******************** 1400 th batch, loss: 0.115446 ********************\n",
      "******************** 1500 th batch, loss: 0.114026 ********************\n",
      "******************** 1600 th batch, loss: 0.113181 ********************\n",
      "******************** 1700 th batch, loss: 0.112160 ********************\n",
      "******************** 1800 th batch, loss: 0.112151 ********************\n",
      "******************** 1900 th batch, loss: 0.112404 ********************\n",
      "******************** 2000 th batch, loss: 0.112656 ********************\n",
      "epoch 2, train accuracy: 0.966067  test accuracy: 0.978900\n",
      "**************************************** epoch: 3 ****************************************\n",
      "******************** 100 th batch, loss: 0.097347 ********************\n",
      "******************** 200 th batch, loss: 0.092374 ********************\n",
      "******************** 300 th batch, loss: 0.095974 ********************\n",
      "******************** 400 th batch, loss: 0.093282 ********************\n",
      "******************** 500 th batch, loss: 0.094074 ********************\n",
      "******************** 600 th batch, loss: 0.096557 ********************\n",
      "******************** 700 th batch, loss: 0.096027 ********************\n",
      "******************** 800 th batch, loss: 0.096519 ********************\n",
      "******************** 900 th batch, loss: 0.097390 ********************\n",
      "******************** 1000 th batch, loss: 0.097327 ********************\n",
      "******************** 1100 th batch, loss: 0.094425 ********************\n",
      "******************** 1200 th batch, loss: 0.094232 ********************\n",
      "******************** 1300 th batch, loss: 0.096179 ********************\n",
      "******************** 1400 th batch, loss: 0.095138 ********************\n",
      "******************** 1500 th batch, loss: 0.095496 ********************\n",
      "******************** 1600 th batch, loss: 0.095212 ********************\n",
      "******************** 1700 th batch, loss: 0.095224 ********************\n",
      "******************** 1800 th batch, loss: 0.094293 ********************\n",
      "******************** 1900 th batch, loss: 0.093824 ********************\n",
      "******************** 2000 th batch, loss: 0.093302 ********************\n",
      "epoch 3, train accuracy: 0.971933  test accuracy: 0.980100\n"
     ]
    }
   ],
   "source": [
    "# 初始化网络\n",
    "mynet = MyNet()\n",
    "\n",
    "# 声明损失函数\n",
    "cross_entropy = CrossEntropyLoss()\n",
    "\n",
    "sgd = SGD(mynet.parameters, lr=0.01, momentum=0.9)\n",
    "\n",
    "train(train_images, train_labels, test_images, test_labels, network=mynet,\n",
    "      loss_fc=cross_entropy, optim=sgd, epochs=3, batch_size=30)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Pytorch",
   "language": "python",
   "name": "pytorch"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
