{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入需要的包\n",
    "import paddle\n",
    "import paddle.fluid as fluid\n",
    "import numpy as np\n",
    "from paddle.fluid.dygraph.nn import Conv2D, Pool2D, Linear\n",
    "\n",
    "# 定义 LeNet 网络结构\n",
    "class LeNet(fluid.dygraph.Layer):\n",
    "    def __init__(self, num_classes=1):\n",
    "        super(LeNet, self).__init__()\n",
    "\n",
    "        # 创建卷积和池化层块，每个卷积层使用Sigmoid激活函数，后面跟着一个2x2的池化\n",
    "        self.conv1 = Conv2D(num_channels=1, num_filters=6, filter_size=5, act='sigmoid')\n",
    "        self.pool1 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')\n",
    "        self.conv2 = Conv2D(num_channels=6, num_filters=16, filter_size=5, act='sigmoid')\n",
    "        self.pool2 = Pool2D(pool_size=2, pool_stride=2, pool_type='max')\n",
    "        # 创建第3个卷积层\n",
    "        self.conv3 = Conv2D(num_channels=16, num_filters=120, filter_size=4, act='sigmoid')\n",
    "        # 创建全连接层，第一个全连接层的输出神经元个数为64， 第二个全连接层输出神经元个数为分类标签的类别数\n",
    "        self.fc1 = Linear(input_dim=120, output_dim=64, act='sigmoid')\n",
    "        self.fc2 = Linear(input_dim=64, output_dim=num_classes)\n",
    "    # 网络的前向计算过程\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = self.pool1(x)\n",
    "        x = self.conv2(x)\n",
    "        x = self.pool2(x)\n",
    "        x = self.conv3(x)\n",
    "        x = fluid.layers.reshape(x, [x.shape[0], -1])\n",
    "        x = self.fc1(x)\n",
    "        x = self.fc2(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[<paddle.fluid.dygraph.nn.Conv2D object at 0x7f6cc8408e50>, <paddle.fluid.dygraph.nn.Pool2D object at 0x7f6cc8408f40>, <paddle.fluid.dygraph.nn.Conv2D object at 0x7f6c91c403b0>, <paddle.fluid.dygraph.nn.Pool2D object at 0x7f6c91c40400>, <paddle.fluid.dygraph.nn.Conv2D object at 0x7f6c91c40450>, <paddle.fluid.dygraph.nn.Linear object at 0x7f6c91c40310>, <paddle.fluid.dygraph.nn.Linear object at 0x7f6c91c406d0>]\n",
      "conv2d_0 [3, 6, 24, 24] [6, 1, 5, 5] [6]\n",
      "pool2d_0 [3, 6, 12, 12]\n",
      "conv2d_1 [3, 16, 8, 8] [16, 6, 5, 5] [16]\n",
      "pool2d_1 [3, 16, 4, 4]\n",
      "conv2d_2 [3, 120, 1, 1] [120, 16, 4, 4] [120]\n",
      "linear_0 [3, 64] [120, 64] [64]\n",
      "linear_1 [3, 10] [64, 10] [10]\n"
     ]
    }
   ],
   "source": [
    "# 输入数据形状是 [N, 1, H, W]\n",
    "# 这里用np.random创建一个随机数组作为输入数据\n",
    "x = np.random.randn(*[3,1,28,28])\n",
    "x = x.astype('float32')\n",
    "with fluid.dygraph.guard():\n",
    "    # 创建LeNet类的实例，指定模型名称和分类的类别数目\n",
    "    m = LeNet(num_classes=10)\n",
    "    # 通过调用LeNet从基类继承的sublayers()函数，\n",
    "    # 查看LeNet中所包含的子层\n",
    "    print(m.sublayers())\n",
    "    x = fluid.dygraph.to_variable(x)\n",
    "    for item in m.sublayers():\n",
    "        # item是LeNet类中的一个子层\n",
    "        # 查看经过子层之后的输出数据形状\n",
    "        try:\n",
    "            x = item(x)\n",
    "        except:\n",
    "            x = fluid.layers.reshape(x, [x.shape[0], -1])\n",
    "            x = item(x)\n",
    "        if len(item.parameters())==2:\n",
    "            # 查看卷积和全连接层的数据和参数的形状，\n",
    "            # 其中item.parameters()[0]是权重参数w，item.parameters()[1]是偏置参数b\n",
    "            print(item.full_name(), x.shape, item.parameters()[0].shape, item.parameters()[1].shape)\n",
    "        else:\n",
    "            # 池化层没有参数\n",
    "            print(item.full_name(), x.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "start training ... \n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Cache file /home/zhangyiming/.cache/paddle/dataset/mnist/t10k-images-idx3-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/mnist/t10k-images-idx3-ubyte.gz \n",
      "Begin to download\n",
      "\n",
      "Download finished\n",
      "Cache file /home/zhangyiming/.cache/paddle/dataset/mnist/t10k-labels-idx1-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/mnist/t10k-labels-idx1-ubyte.gz \n",
      "Begin to download\n",
      "..\n",
      "Download finished\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0, batch_id: 0, loss is: [2.6400573]\n",
      "epoch: 0, batch_id: 1000, loss is: [2.2969244]\n",
      "epoch: 0, batch_id: 2000, loss is: [2.3344383]\n",
      "epoch: 0, batch_id: 3000, loss is: [2.276091]\n",
      "epoch: 0, batch_id: 4000, loss is: [2.2618005]\n",
      "epoch: 0, batch_id: 5000, loss is: [2.3271308]\n",
      "[validation] accuracy/loss: 0.29079997539520264/2.2582263946533203\n",
      "epoch: 1, batch_id: 0, loss is: [2.243028]\n",
      "epoch: 1, batch_id: 1000, loss is: [2.1969285]\n",
      "epoch: 1, batch_id: 2000, loss is: [2.2286074]\n",
      "epoch: 1, batch_id: 3000, loss is: [1.8787334]\n",
      "epoch: 1, batch_id: 4000, loss is: [1.4441547]\n",
      "epoch: 1, batch_id: 5000, loss is: [1.8379142]\n",
      "[validation] accuracy/loss: 0.6210999488830566/1.2860932350158691\n",
      "epoch: 2, batch_id: 0, loss is: [1.058869]\n",
      "epoch: 2, batch_id: 1000, loss is: [0.90484065]\n",
      "epoch: 2, batch_id: 2000, loss is: [0.81891215]\n",
      "epoch: 2, batch_id: 3000, loss is: [0.5198554]\n",
      "epoch: 2, batch_id: 4000, loss is: [0.47320366]\n",
      "epoch: 2, batch_id: 5000, loss is: [0.86554307]\n",
      "[validation] accuracy/loss: 0.8527999520301819/0.5233003497123718\n",
      "epoch: 3, batch_id: 0, loss is: [0.42588395]\n",
      "epoch: 3, batch_id: 1000, loss is: [0.31154042]\n",
      "epoch: 3, batch_id: 2000, loss is: [0.26407945]\n",
      "epoch: 3, batch_id: 3000, loss is: [0.1915925]\n",
      "epoch: 3, batch_id: 4000, loss is: [0.22699828]\n",
      "epoch: 3, batch_id: 5000, loss is: [0.38284102]\n",
      "[validation] accuracy/loss: 0.9136999249458313/0.31733468174934387\n",
      "epoch: 4, batch_id: 0, loss is: [0.2942542]\n",
      "epoch: 4, batch_id: 1000, loss is: [0.17520982]\n",
      "epoch: 4, batch_id: 2000, loss is: [0.15157521]\n",
      "epoch: 4, batch_id: 3000, loss is: [0.07468078]\n",
      "epoch: 4, batch_id: 4000, loss is: [0.12444584]\n",
      "epoch: 4, batch_id: 5000, loss is: [0.20527256]\n",
      "[validation] accuracy/loss: 0.9333999752998352/0.23463031649589539\n"
     ]
    }
   ],
   "source": [
    "# LeNet 识别手写数字\n",
    "\n",
    "import os\n",
    "import random\n",
    "import paddle\n",
    "import paddle.fluid as fluid\n",
    "import numpy as np\n",
    "\n",
    "# 定义训练过程\n",
    "def train(model):\n",
    "    print('start training ... ')\n",
    "    model.train()\n",
    "    epoch_num = 5\n",
    "    opt = fluid.optimizer.Momentum(learning_rate=0.001, momentum=0.9, parameter_list=model.parameters())\n",
    "    # 使用Paddle自带的数据读取器\n",
    "    train_loader = paddle.batch(paddle.dataset.mnist.train(), batch_size=10)\n",
    "    valid_loader = paddle.batch(paddle.dataset.mnist.test(), batch_size=10)\n",
    "    for epoch in range(epoch_num):\n",
    "        for batch_id, data in enumerate(train_loader()):\n",
    "            # 调整输入数据形状和类型\n",
    "            x_data = np.array([item[0] for item in data], dtype='float32').reshape(-1, 1, 28, 28)\n",
    "            y_data = np.array([item[1] for item in data], dtype='int64').reshape(-1, 1)\n",
    "            # 将numpy.ndarray转化成Tensor\n",
    "            img = fluid.dygraph.to_variable(x_data)\n",
    "            label = fluid.dygraph.to_variable(y_data)\n",
    "            # 计算模型输出\n",
    "            logits = model(img)\n",
    "            # 计算损失函数\n",
    "            loss = fluid.layers.softmax_with_cross_entropy(logits, label)\n",
    "            avg_loss = fluid.layers.mean(loss)\n",
    "            if batch_id % 1000 == 0:\n",
    "                print(\"epoch: {}, batch_id: {}, loss is: {}\".format(epoch, batch_id, avg_loss.numpy()))\n",
    "            avg_loss.backward()\n",
    "            opt.minimize(avg_loss)\n",
    "            model.clear_gradients()\n",
    "\n",
    "        model.eval()\n",
    "        accuracies = []\n",
    "        losses = []\n",
    "        for batch_id, data in enumerate(valid_loader()):\n",
    "            # 调整输入数据形状和类型\n",
    "            x_data = np.array([item[0] for item in data], dtype='float32').reshape(-1, 1, 28, 28)\n",
    "            y_data = np.array([item[1] for item in data], dtype='int64').reshape(-1, 1)\n",
    "            # 将numpy.ndarray转化成Tensor\n",
    "            img = fluid.dygraph.to_variable(x_data)\n",
    "            label = fluid.dygraph.to_variable(y_data)\n",
    "            # 计算模型输出\n",
    "            logits = model(img)\n",
    "            pred = fluid.layers.softmax(logits)\n",
    "            # 计算损失函数\n",
    "            loss = fluid.layers.softmax_with_cross_entropy(logits, label)\n",
    "            acc = fluid.layers.accuracy(pred, label)\n",
    "            accuracies.append(acc.numpy())\n",
    "            losses.append(loss.numpy())\n",
    "        print(\"[validation] accuracy/loss: {}/{}\".format(np.mean(accuracies), np.mean(losses)))\n",
    "        model.train()\n",
    "\n",
    "    # 保存模型参数\n",
    "    fluid.save_dygraph(model.state_dict(), 'mnist')\n",
    "\n",
    "    \n",
    "# 创建模型\n",
    "with fluid.dygraph.guard():\n",
    "    model = LeNet(num_classes=10)\n",
    "    #启动训练过程\n",
    "    train(model)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.1"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
