{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# Learning Paddle\n",
    "## Part 1 数据获取, 加载与预处理\n",
    "### 1. 调用内置数据集与自定义数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['DatasetFolder', 'ImageFolder', 'MNIST', 'FashionMNIST', 'Flowers', 'Cifar10', 'Cifar100', 'VOC2012']\n",
      "['Conll05st', 'Imdb', 'Imikolov', 'Movielens', 'UCIHousing', 'WMT14', 'WMT16']\n"
     ]
    }
   ],
   "source": [
    "import paddle\r\n",
    "print(paddle.vision.datasets.__all__)\r\n",
    "print(paddle.text.datasets.__all__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "from paddle.vision.transforms import ToTensor\r\n",
    "train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=ToTensor)\r\n",
    "test_dataset = paddle.vision.datasets.MNIST(mode='test', transform=ToTensor)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=============custom dataset=============\n",
      "[28, 28] [1]\n"
     ]
    }
   ],
   "source": [
    "import paddle\r\n",
    "from paddle.io import Dataset\r\n",
    "\r\n",
    "BATCH_SIZE = 64\r\n",
    "BATCH_NUM = 20\r\n",
    "\r\n",
    "IMAGE_SIZE = (28, 28)\r\n",
    "CLASS_NUM = 10\r\n",
    "\r\n",
    "\r\n",
    "class MyDataset(Dataset):\r\n",
    "    \"\"\"\r\n",
    "    步骤一：继承paddle.io.Dataset类\r\n",
    "    \"\"\"\r\n",
    "    def __init__(self, num_samples=42):\r\n",
    "        \"\"\"\r\n",
    "        步骤二：实现构造函数，定义数据集大小\r\n",
    "        \"\"\"\r\n",
    "        super(MyDataset, self).__init__()\r\n",
    "        self.num_samples = num_samples\r\n",
    "\r\n",
    "    def __getitem__(self, index):\r\n",
    "        \"\"\"\r\n",
    "        步骤三：实现__getitem__方法，定义指定index时如何获取数据，并返回单条数据（训练数据，对应的标签）\r\n",
    "        @dandelight: 如果自己写, 读取数据的方法在这里实现.\r\n",
    "        \"\"\"\r\n",
    "        data = paddle.uniform(IMAGE_SIZE, dtype='float32')\r\n",
    "        label = paddle.randint(0, CLASS_NUM-1, dtype='int64')\r\n",
    "\r\n",
    "        return data, label\r\n",
    "\r\n",
    "    def __len__(self):\r\n",
    "        \"\"\"\r\n",
    "        步骤四：实现__len__方法，返回数据集总数目\r\n",
    "        \"\"\"\r\n",
    "        return self.num_samples\r\n",
    "        # return 42  # The Answer to The Ultimate Question of Life, The Universe, and Everything.\r\n",
    "\r\n",
    "# 测试定义的数据集\r\n",
    "custom_dataset = MyDataset(BATCH_SIZE * BATCH_NUM)\r\n",
    "\r\n",
    "print('=============custom dataset=============')\r\n",
    "for data, label in custom_dataset:\r\n",
    "    print(data.shape, label.shape)\r\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[64, 28, 28]\n",
      "[64, 1]\n"
     ]
    }
   ],
   "source": [
    "train_loader = paddle.io.DataLoader(custom_dataset, batch_size=BATCH_SIZE, shuffle=True)\r\n",
    "# 如果要加载内置数据集，将 custom_dataset 换为 train_dataset 即可\r\n",
    "for batch_id, data in enumerate(train_loader()):\r\n",
    "    x_data = data[0]\r\n",
    "    y_data = data[1]\r\n",
    "\r\n",
    "    print(x_data.shape)\r\n",
    "    print(y_data.shape)\r\n",
    "    break"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据处理方法:  ['BaseTransform', 'Compose', 'Resize', 'RandomResizedCrop', 'CenterCrop', 'RandomHorizontalFlip', 'RandomVerticalFlip', 'Transpose', 'Normalize', 'BrightnessTransform', 'SaturationTransform', 'ContrastTransform', 'HueTransform', 'ColorJitter', 'RandomCrop', 'Pad', 'RandomRotation', 'Grayscale', 'ToTensor', 'to_tensor', 'hflip', 'vflip', 'resize', 'pad', 'rotate', 'to_grayscale', 'crop', 'center_crop', 'adjust_brightness', 'adjust_contrast', 'adjust_hue', 'normalize']\n"
     ]
    }
   ],
   "source": [
    "print('数据处理方法: ', paddle.vision.transforms.__all__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "from paddle.vision.transforms import Compose, Resize, ColorJitter\r\n",
    "\r\n",
    "# 定义想要使用的数据增强方式，这里包括随机调整亮度、对比度和饱和度，改变图片大小\r\n",
    "transform = Compose([ColorJitter(), Resize(size=32)])\r\n",
    "\r\n",
    "# 通过transform参数传递定义好的数据增强方法即可完成对自带数据集的增强\r\n",
    "# train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=transform)\r\n",
    "# 会影响下一步"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=============custom dataset=============\n",
      "(32, 32) [1]\n"
     ]
    }
   ],
   "source": [
    "import paddle\r\n",
    "from paddle.io import Dataset\r\n",
    "from paddle.vision.transforms import Compose, Resize\r\n",
    "\r\n",
    "BATCH_SIZE = 64\r\n",
    "BATCH_NUM = 20\r\n",
    "\r\n",
    "IMAGE_SIZE = (28, 28)\r\n",
    "CLASS_NUM = 10\r\n",
    "\r\n",
    "class MyDataset(Dataset):\r\n",
    "    def __init__(self, num_samples=42):\r\n",
    "        super(MyDataset, self).__init__()\r\n",
    "        self.num_samples = num_samples\r\n",
    "        # 在 `__init__` 中定义数据增强方法，此处为调整图像大小\r\n",
    "        self.transform = Compose([Resize(size=32)])\r\n",
    "\r\n",
    "    def __getitem__(self, index):\r\n",
    "        data = paddle.uniform(IMAGE_SIZE, dtype='float32')\r\n",
    "        # 在 `__getitem__` 中对数据集使用数据增强方法\r\n",
    "        # 所谓数据增强\r\n",
    "        data = self.transform(data.numpy())\r\n",
    "\r\n",
    "        label = paddle.randint(0, CLASS_NUM-1, dtype='int64')\r\n",
    "\r\n",
    "        return data, label\r\n",
    "\r\n",
    "    def __len__(self):\r\n",
    "        return self.num_samples\r\n",
    "\r\n",
    "# 测试定义的数据集\r\n",
    "custom_dataset = MyDataset(BATCH_SIZE * BATCH_NUM)\r\n",
    "\r\n",
    "print('=============custom dataset=============')\r\n",
    "for data, label in custom_dataset:\r\n",
    "    print(data.shape, label.shape)\r\n",
    "    break\r\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "可以看出，输出的形状从 $[28, 28, 1]$ 变为了 $[32, 32, 1]$，证明完成了图像的大小调整。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# print(paddle.randint.__doc__)\r\n",
    "# help(paddle.randint)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## Part 2 搞到了数据, 开始模型组网"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "飞桨框架2.0中，组网相关的API都在`paddle.nn`目录下，你可以通过 `Sequential` 或 `SubClass` 的方式构建具体的模型。组网相关的API类别与具体的API列表如下表：\n",
    "\n",
    "| 功能         | API名称                                                      |\n",
    "| ------------ | ------------------------------------------------------------ |\n",
    "| Conv         | Conv1D、Conv2D、Conv3D、Conv1DTranspose、Conv2DTranspose、Conv3DTranspose |\n",
    "| Pool         | AdaptiveAvgPool1D、AdaptiveAvgPool2D、AdaptiveAvgPool3D、 AdaptiveMaxPool1D、AdaptiveMaxPool2D、AdaptiveMaxPool3D、 AvgPool1D、AvgPool2D、AvgPool3D、MaxPool1D、MaxPool2D、MaxPool3D |\n",
    "| Padding      | Pad1D、Pad2D、Pad3d                                          |\n",
    "| Activation   | ELU、GELU、Hardshrink、Hardtanh、HSigmoid、LeakyReLU、LogSigmoid、 LogSoftmax、PReLU、ReLU、ReLU6、SELU、Sigmoid、Softmax、Softplus、 Softshrink、Softsign、Tanh、Tanhshrink |\n",
    "| Normlization | BatchNorm、BatchNorm1D、BatchNorm2D、BatchNorm3D、GroupNorm、 InstanceNorm1D、InstanceNorm2D、InstanceNorm3D、LayerNorm、SpectralNorm、 SyncBatchNorm |\n",
    "| Recurrent NN | BiRNN、GRU、GRUCell、LSTM、LSTMCell、RNN、RNNCellBase、SimpleRNN、 SimpleRNNCell |\n",
    "| Transformer  | Transformer、TransformerDecoder、TransformerDecoderLayer、 TransformerEncoder、TransformerEncoderLayer |\n",
    "| Dropout      | AlphaDropout、Dropout、Dropout2d、Dropout3d                  |\n",
    "| Loss         | BCELoss、BCEWithLogitsLoss、CrossEntropyLoss、CTCLoss、KLDivLoss、L1Loss MarginRankingLoss、MSELoss、NLLLoss、SmoothL1Loss |"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "### `Sequential`组网\n",
    "\n",
    "针对顺序的线性网络结构你可以直接使用`Sequential`来快速完成组网，可以减少类的定义等代码编写。(类似于`TensorFlow`中的`tf.keras.Sequential`) [API文档](https://www.paddlepaddle.org.cn/documentation/docs/zh/api/paddle/fluid/dygraph/container/Sequential_cn.html)\n",
    "\n",
    "和`keras`不同的是, 这次把网络全当成`Sequential`的参数.\n",
    "\n",
    "(问题: `TensorFlow`里能这样吗?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "import paddle\r\n",
    "# Sequential形式组网\r\n",
    "mnist = paddle.nn.Sequential(\r\n",
    "    paddle.nn.Flatten(1, -1),\r\n",
    "    paddle.nn.Linear(784, 512),\r\n",
    "    paddle.nn.ReLU(),\r\n",
    "    paddle.nn.Dropout(0.2),\r\n",
    "    paddle.nn.Linear(512, 10)\r\n",
    ")\r\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "针对一些比较复杂的网络结构，就可以使用`Layer`子类定义的方式来进行模型代码编写，在`__init__`构造函数中进行组网`Layer`的声明，在`forward`中使用声明的`Layer`变量进行前向计算。子类组网方式也可以实现`sublayer`的复用，针对相同的`layer`可以在构造函数中一次性定义，在`forward`中多次调用。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# Layer类继承方式组网\r\n",
    "class Mnist(paddle.nn.Layer):\r\n",
    "    # 在__init__中进行Layer声明(但是没有调用, 要在下面的forward函数中真正起作用.)\r\n",
    "    def __init__(self):\r\n",
    "        super(Mnist, self).__init__()\r\n",
    "\r\n",
    "        self.flatten = paddle.nn.Flatten()\r\n",
    "        self.linear_1 = paddle.nn.Linear(784, 512)\r\n",
    "        self.linear_2 = paddle.nn.Linear(512, 10)\r\n",
    "        self.relu = paddle.nn.ReLU()\r\n",
    "        self.dropout = paddle.nn.Dropout(0.2)\r\n",
    "    # forward相当于回调函数, 实现网络的逻辑(姑且用这词)\r\n",
    "    # 注意linear_1和linear_2在上下的顺序.\r\n",
    "    def forward(self, inputs):\r\n",
    "        y = self.flatten(inputs)\r\n",
    "        y = self.linear_1(y)\r\n",
    "        y = self.relu(y)\r\n",
    "        y = self.dropout(y)\r\n",
    "        y = self.linear_2(y)\r\n",
    "\r\n",
    "        return y\r\n",
    "\r\n",
    "mnist_2 = Mnist()\r\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "~~当然搞到代码最快速的方式是用别人的代码~~, 飞桨框架内置了一系列模型\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "飞桨框架内置模型： ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101', 'resnet152', 'VGG', 'vgg11', 'vgg13', 'vgg16', 'vgg19', 'MobileNetV1', 'mobilenet_v1', 'MobileNetV2', 'mobilenet_v2', 'LeNet']\n"
     ]
    }
   ],
   "source": [
    "print('飞桨框架内置模型：', paddle.vision.models.__all__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "lenet = paddle.vision.models.LeNet() #  用就是了, It's free."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "---------------------------------------------------------------------------\n",
      " Layer (type)       Input Shape          Output Shape         Param #    \n",
      "===========================================================================\n",
      "   Conv2D-3      [[64, 1, 28, 28]]     [64, 6, 28, 28]          60       \n",
      "    ReLU-7       [[64, 6, 28, 28]]     [64, 6, 28, 28]           0       \n",
      "  MaxPool2D-3    [[64, 6, 28, 28]]     [64, 6, 14, 14]           0       \n",
      "   Conv2D-4      [[64, 6, 14, 14]]     [64, 16, 10, 10]        2,416     \n",
      "    ReLU-8       [[64, 16, 10, 10]]    [64, 16, 10, 10]          0       \n",
      "  MaxPool2D-4    [[64, 16, 10, 10]]     [64, 16, 5, 5]           0       \n",
      "   Linear-12        [[64, 400]]           [64, 120]           48,120     \n",
      "   Linear-13        [[64, 120]]            [64, 84]           10,164     \n",
      "   Linear-14         [[64, 84]]            [64, 10]             850      \n",
      "===========================================================================\n",
      "Total params: 61,610\n",
      "Trainable params: 61,610\n",
      "Non-trainable params: 0\n",
      "---------------------------------------------------------------------------\n",
      "Input size (MB): 0.19\n",
      "Forward/backward pass size (MB): 7.03\n",
      "Params size (MB): 0.24\n",
      "Estimated Total Size (MB): 7.46\n",
      "---------------------------------------------------------------------------\n",
      "\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'total_params': 61610, 'trainable_params': 61610}"
      ]
     },
     "execution_count": null,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "paddle.summary(lenet, (64, 1, 28, 28)) \r\n",
    "# 还可以通过paddle.summary查看模型的结构和每一层的输入输出形状"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "<class 'paddle.vision.models.lenet.LeNet'>\n"
     ]
    }
   ],
   "source": [
    "print(type(lenet))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "~~原料备齐炼丹炉也搭好了，下一步就是开始炼丹了~~\n",
    "\n",
    "## Part 3: 训练与预测\n",
    "\n",
    "在完成数据预处理，数据加载与模型的组建后，你就可以进行模型的训练与预测了。飞桨框架提供了以下两种训练与预测的方法：\n",
    "* 一种是用`paddle.Model`对模型进行封装，通过高层API如`Model.fit(`)、`Model.evaluate()`、`Model.predict()`等完成模型的训练与预测\n",
    "* 另一种就是基于基础API常规的训练方式。\n",
    "\n",
    "高层API实现的模型训练与预测如`Model.fit()`、`Model.evaluate()`、`Model.predict()`都可以通过基础API实现"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Cache file /home/aistudio/.cache/paddle/dataset/mnist/train-images-idx3-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/mnist/train-images-idx3-ubyte.gz \n",
      "Begin to download\n",
      "\n",
      "Download finished\n",
      "Cache file /home/aistudio/.cache/paddle/dataset/mnist/train-labels-idx1-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/mnist/train-labels-idx1-ubyte.gz \n",
      "Begin to download\n",
      "........\n",
      "Download finished\n",
      "Cache file /home/aistudio/.cache/paddle/dataset/mnist/t10k-images-idx3-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/mnist/t10k-images-idx3-ubyte.gz \n",
      "Begin to download\n",
      "\n",
      "Download finished\n",
      "Cache file /home/aistudio/.cache/paddle/dataset/mnist/t10k-labels-idx1-ubyte.gz not found, downloading https://dataset.bj.bcebos.com/mnist/t10k-labels-idx1-ubyte.gz \n",
      "Begin to download\n",
      "..\n",
      "Download finished\n"
     ]
    }
   ],
   "source": [
    "# 上边写得太乱了，重新写一下\r\n",
    "import paddle\r\n",
    "from paddle.vision.transforms import ToTensor\r\n",
    "\r\n",
    "# 加载数据集\r\n",
    "train_dataset = paddle.vision.datasets.MNIST(mode='train', transform=ToTensor())\r\n",
    "test_dataset = paddle.vision.datasets.MNIST(mode='test', transform=ToTensor())\r\n",
    "\r\n",
    "# 定义网络结构\r\n",
    "mnist = paddle.nn.Sequential(\r\n",
    "    paddle.nn.Flatten(1, -1),\r\n",
    "    paddle.nn.Linear(784, 512),\r\n",
    "    paddle.nn.ReLU(),\r\n",
    "    paddle.nn.Dropout(0.2),\r\n",
    "    paddle.nn.Linear(512, 10)\r\n",
    ")\r\n",
    "\r\n",
    "model = paddle.Model(mnist)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 为模型训练做准备，设置优化器(Adam)，损失函数(CEL)和精度计算方式(准确率评估器)\r\n",
    "model.prepare(optimizer=paddle.optimizer.Adam(parameters=model.parameters()),\r\n",
    "              loss=paddle.nn.CrossEntropyLoss(),\r\n",
    "              metrics=paddle.metric.Accuracy())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "The loss value printed in the log is the current step, and the metric is the average value of previous step.\n",
      "Epoch 1/5\n",
      "step  10/938 [..............................] - loss: 1.0523 - acc: 0.5062 - ETA: 34s - 37ms/step"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/envs/python35-paddle120-env/lib/python3.7/site-packages/paddle/fluid/layers/utils.py:77: DeprecationWarning: Using or importing the ABCs from 'collections' instead of from 'collections.abc' is deprecated, and in 3.8 it will stop working\n",
      "  return (isinstance(seq, collections.Sequence) and\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "step 938/938 [==============================] - loss: 0.1410 - acc: 0.9295 - 22ms/step         \n",
      "Epoch 2/5\n",
      "step 938/938 [==============================] - loss: 0.0421 - acc: 0.9690 - 22ms/step        \n",
      "Epoch 3/5\n",
      "step 938/938 [==============================] - loss: 0.0567 - acc: 0.9776 - 22ms/step        \n",
      "Epoch 4/5\n",
      "step 938/938 [==============================] - loss: 0.0064 - acc: 0.9827 - 23ms/step         \n",
      "Epoch 5/5\n",
      "step 938/938 [==============================] - loss: 0.0142 - acc: 0.9861 - 22ms/step        \n"
     ]
    }
   ],
   "source": [
    "# 启动模型训练，指定训练数据集，设置训练轮次，设置每次数据集计算的批次大小，设置日志格式\r\n",
    "model.fit(train_dataset,\r\n",
    "          epochs=5,\r\n",
    "          batch_size=64,\r\n",
    "          verbose=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Eval begin...\n",
      "The loss value printed in the log is the current batch, and the metric is the average value of previous step.\n",
      "step 10000/10000 [==============================] - loss: 6.5565e-06 - acc: 0.9808 - 2ms/step         \n",
      "Eval samples: 10000\n"
     ]
    }
   ],
   "source": [
    "# 用 evaluate 在测试集上对模型进行验证\r\n",
    "eval_result = model.evaluate(test_dataset, verbose=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Predict begin...\n",
      "step 10000/10000 [==============================] - 2ms/step        \n",
      "Predict samples: 10000\n"
     ]
    }
   ],
   "source": [
    "# 用 predict 在测试集上对模型进行测试\r\n",
    "test_result = model.predict(test_dataset)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "`evaluate`和`predict`有什么区别呢?"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "随后, 我们来深入`paddle`的底层API\n",
    "\n",
    "首先是拆解Model.prepare()、Model.fit()-- 用基础API训练模型\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0, batch_id: 900, loss is: [0.07276471], acc is: [0.96875]\n",
      "epoch: 1, batch_id: 900, loss is: [0.01826966], acc is: [1.]\n",
      "epoch: 2, batch_id: 900, loss is: [0.02491308], acc is: [0.984375]\n",
      "epoch: 3, batch_id: 900, loss is: [0.02067329], acc is: [1.]\n",
      "epoch: 4, batch_id: 900, loss is: [0.08238722], acc is: [0.96875]\n"
     ]
    }
   ],
   "source": [
    "# dataset与mnist的定义与第一部分内容一致\r\n",
    "\r\n",
    "# 用 DataLoader 实现数据加载\r\n",
    "train_loader = paddle.io.DataLoader(train_dataset, batch_size=64, shuffle=True)\r\n",
    "\r\n",
    "mnist.train()\r\n",
    "\r\n",
    "# 设置迭代次数\r\n",
    "epochs = 5\r\n",
    "\r\n",
    "# 设置优化器\r\n",
    "optim = paddle.optimizer.Adam(parameters=mnist.parameters())\r\n",
    "# 设置损失函数\r\n",
    "loss_fn = paddle.nn.CrossEntropyLoss()\r\n",
    "\r\n",
    "for epoch in range(epochs):\r\n",
    "    for batch_id, data in enumerate(train_loader()):\r\n",
    "\r\n",
    "        x_data = data[0]            # 训练数据\r\n",
    "        y_data = data[1]            # 训练数据标签\r\n",
    "        predicts = mnist(x_data)    # 预测结果\r\n",
    "\r\n",
    "        # 计算损失 等价于 prepare 中loss的设置\r\n",
    "        loss = loss_fn(predicts, y_data)\r\n",
    "\r\n",
    "        # 计算准确率 等价于 prepare 中metrics的设置\r\n",
    "        acc = paddle.metric.accuracy(predicts, y_data)\r\n",
    "\r\n",
    "        # 下面的反向传播、打印训练信息、更新参数、梯度清零都被封装到 Model.fit() 中\r\n",
    "\r\n",
    "        # 反向传播\r\n",
    "        loss.backward()\r\n",
    "\r\n",
    "        if (batch_id+1) % 900 == 0:\r\n",
    "            print(\"epoch: {}, batch_id: {}, loss is: {}, acc is: {}\".format(epoch, batch_id+1, loss.numpy(), acc.numpy()))\r\n",
    "\r\n",
    "        # 更新参数\r\n",
    "        optim.step()\r\n",
    "\r\n",
    "        # 梯度清零\r\n",
    "        optim.clear_grad()\r\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "随后是模型的验证:`Model.fit()`"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "batch_id: 30, loss is: [0.15280068], acc is: [0.96875]\n",
      "batch_id: 60, loss is: [0.154762], acc is: [0.96875]\n",
      "batch_id: 90, loss is: [0.05788707], acc is: [0.984375]\n",
      "batch_id: 120, loss is: [0.00046292], acc is: [1.]\n",
      "batch_id: 150, loss is: [0.13221826], acc is: [0.984375]\n"
     ]
    }
   ],
   "source": [
    "# 加载测试数据集\r\n",
    "test_loader = paddle.io.DataLoader(test_dataset, batch_size=64, drop_last=True)\r\n",
    "loss_fn = paddle.nn.CrossEntropyLoss()\r\n",
    "\r\n",
    "mnist.eval()\r\n",
    "\r\n",
    "for batch_id, data in enumerate(test_loader()):\r\n",
    "\r\n",
    "    x_data = data[0]            # 测试数据\r\n",
    "    y_data = data[1]            # 测试数据标签\r\n",
    "    predicts = mnist(x_data)    # 预测结果\r\n",
    "\r\n",
    "    # 计算损失与精度\r\n",
    "    loss = loss_fn(predicts, y_data)\r\n",
    "    acc = paddle.metric.accuracy(predicts, y_data)\r\n",
    "\r\n",
    "    # 打印信息\r\n",
    "    if (batch_id+1) % 30 == 0:\r\n",
    "        print(\"batch_id: {}, loss is: {}, acc is: {}\".format(batch_id+1, loss.numpy(), acc.numpy()))\r\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "用基础API实现`Model.predict()` 测试模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "predict finished\n"
     ]
    }
   ],
   "source": [
    "# 加载测试数据集\r\n",
    "test_loader = paddle.io.DataLoader(test_dataset, batch_size=64, drop_last=True)\r\n",
    "\r\n",
    "mnist.eval()\r\n",
    "for batch_id, data in enumerate(test_loader()):\r\n",
    "    x_data = data[0]\r\n",
    "    predicts = mnist(x_data)\r\n",
    "    # 获取预测结果\r\n",
    "print(\"predict finished\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "到此，在`paddlepaddle`上实现了对经典的`MNIST`数据集进行学习与预测的神经网络；随后，我们将学习~~更高级的炼丹技术~~，诸如：\n",
    "* 资源配置\n",
    "* 自定义指标\n",
    "* 模型存储与载入\n",
    "* 模型导出`ONNX`协议"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PaddlePaddle 2.0.0b0 (Python 3.5)",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
