{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.5.2\n"
     ]
    }
   ],
   "source": [
    "import paddle\n",
    "import paddle.nn.functional as F\n",
    "from paddle.nn import Linear\n",
    "import numpy as np\n",
    "import os\n",
    "import json \n",
    "import random \n",
    "print(paddle.__version__)\n",
    "from paddle.nn import Conv2D,MaxPool2D"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data(mode='train'):\n",
    "    with open('mnist.json') as f:\n",
    "        data = json.load(f)\n",
    "    train_set, val_set, eval_set = data\n",
    "    if mode == 'train':\n",
    "        imgs, labels = train_set[0], train_set[1]\n",
    "    elif mode == 'valid':\n",
    "        imgs, labels = val_set[0], val_set[1]\n",
    "    elif mode == 'eval':\n",
    "        imgs, labels = eval_set[0], eval_set[1]\n",
    "    else:\n",
    "        raise Exception(\"mode can only be one of['train', 'valid', 'eval']\")\n",
    "    print('训练数据集数量:', len(imgs))\n",
    "    imgs_length = len(imgs)\n",
    "    index_list = list(range(imgs_length))\n",
    "    BATCHSIZE = 100\n",
    "\n",
    "    def data_generator():\n",
    "        if mode == 'train':\n",
    "            random.shuffle(index_list)\n",
    "        imgs_list = []\n",
    "        labels_list = []\n",
    "        for i in index_list:\n",
    "            # img=np.array(imgs[i]).astype('float32')\n",
    "            img = np.reshape(imgs[i], [1, 28, 28]).astype('float32')\n",
    "            label = np.reshape(labels[i], [1]).astype('int64')\n",
    "            imgs_list.append(img)\n",
    "            labels_list.append(label)\n",
    "            if len(imgs_list) == BATCHSIZE:\n",
    "                yield np.array(imgs_list), np.array(labels_list)\n",
    "                imgs_list = []\n",
    "                labels_list = []\n",
    "        if len(imgs_list) > 0:\n",
    "            yield np.array(imgs_list), np.array(labels_list)\n",
    "\n",
    "    return data_generator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LeNetModel(paddle.nn.Layer):\n",
    "    def __init__(self):\n",
    "        super(LeNetModel, self).__init__()\n",
    "        self.conv1 = paddle.nn.Conv2D(in_channels=1, out_channels=6, kernel_size=5, stride=1)\n",
    "        self.pool1 = paddle.nn.MaxPool2D(kernel_size=2, stride=2)\n",
    "        self.conv2 = paddle.nn.Conv2D(in_channels=6, out_channels=16, kernel_size=5, stride=1)\n",
    "        self.pool2 = paddle.nn.MaxPool2D(kernel_size=2, stride=2)\n",
    "        self.fc1 = paddle.nn.Linear(256, 120)\n",
    "        self.fc2 = paddle.nn.Linear(120, 84)\n",
    "        self.fc3 = paddle.nn.Linear(84, 10)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv1(x)\n",
    "        x = F.relu(x)\n",
    "        x = self.pool1(x)\n",
    "        x = self.conv2(x)\n",
    "        x = F.relu(x)\n",
    "        x = self.pool2(x)\n",
    "        x = paddle.flatten(x, start_axis=1, stop_axis=-1)\n",
    "        x = self.fc1(x)\n",
    "        x = F.relu(x)\n",
    "        x = self.fc2(x)\n",
    "        x = F.relu(x)\n",
    "        x = self.fc3(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 49,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model):\n",
    "    model.train()\n",
    "\n",
    "    opt = paddle.optimizer.SGD(learning_rate=0.01, parameters=model.parameters())\n",
    "\n",
    "    EPOCH_NUM = 5\n",
    "    train_loader = load_data('train')\n",
    "    valid_loader = load_data('valid')\n",
    "    for epoch_id in range(EPOCH_NUM):\n",
    "\n",
    "        for batch_id, data in enumerate(train_loader()):\n",
    "            images, labels = data\n",
    "            images = paddle.to_tensor(images)\n",
    "            labels = paddle.to_tensor(labels)\n",
    "\n",
    "            predict = model(images)\n",
    "\n",
    "            loss = F.softmax_with_cross_entropy(predict, labels)\n",
    "\n",
    "            avg_loss = paddle.mean(loss)\n",
    "            if batch_id % 200 == 0:\n",
    "                print(\"epoch: {}, batch_id: {}, loss is: {}\".format(epoch_id, batch_id, avg_loss.numpy()))\n",
    "\n",
    "            avg_loss.backward()\n",
    "            opt.step()\n",
    "            opt.clear_grad()\n",
    "\n",
    "        model.eval()\n",
    "        accuracies = []\n",
    "        losses = []\n",
    "        for batch_id, data in enumerate(valid_loader()):\n",
    "            images, labels = data\n",
    "            images = paddle.to_tensor(images)\n",
    "            labels = paddle.to_tensor(labels)\n",
    "\n",
    "            logits = model(images)\n",
    "            pred = F.softmax(logits)\n",
    "\n",
    "            loss = F.softmax_with_cross_entropy(logits, labels)\n",
    "\n",
    "            acc = paddle.metric.accuracy(pred, labels)\n",
    "            accuracies.append(acc.numpy())\n",
    "\n",
    "            losses.append(loss.numpy())\n",
    "        print(\"[validation] accuracy/loss: {}/{}\".format(np.mean(accuracies), np.mean(losses)))\n",
    "    model.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练数据集数量: 50000\n"
     ]
    }
   ],
   "source": [
    "train_loader=load_data('train')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练数据集数量: 10000\n"
     ]
    }
   ],
   "source": [
    "valid_loader=load_data('valid')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练数据集数量: 50000\n",
      "训练数据集数量: 10000\n",
      "epoch: 0, batch_id: 0, loss is: [3.3820574]\n",
      "epoch: 0, batch_id: 200, loss is: [0.5567497]\n",
      "epoch: 0, batch_id: 400, loss is: [0.44647548]\n",
      "[validation] accuracy/loss: 0.9276000261306763/0.24431662261486053\n",
      "epoch: 1, batch_id: 0, loss is: [0.38228038]\n",
      "epoch: 1, batch_id: 200, loss is: [0.1983403]\n",
      "epoch: 1, batch_id: 400, loss is: [0.32270378]\n",
      "[validation] accuracy/loss: 0.9528999328613281/0.16652554273605347\n",
      "epoch: 2, batch_id: 0, loss is: [0.15658173]\n",
      "epoch: 2, batch_id: 200, loss is: [0.15364914]\n",
      "epoch: 2, batch_id: 400, loss is: [0.14992711]\n",
      "[validation] accuracy/loss: 0.9594001173973083/0.14168687164783478\n",
      "epoch: 3, batch_id: 0, loss is: [0.07168043]\n",
      "epoch: 3, batch_id: 200, loss is: [0.20058045]\n",
      "epoch: 3, batch_id: 400, loss is: [0.12742847]\n",
      "[validation] accuracy/loss: 0.9671000838279724/0.11429408937692642\n",
      "epoch: 4, batch_id: 0, loss is: [0.11879345]\n",
      "epoch: 4, batch_id: 200, loss is: [0.08693839]\n",
      "epoch: 4, batch_id: 400, loss is: [0.08274473]\n",
      "[validation] accuracy/loss: 0.9688999652862549/0.11161471903324127\n"
     ]
    }
   ],
   "source": [
    "model = LeNetModel()\n",
    "train(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 53,
   "metadata": {},
   "outputs": [],
   "source": [
    "paddle.save(model.state_dict(), 'mnist-cnn.pdparams')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "from PIL import Image  \n",
    "import numpy as np  \n",
    "import matplotlib.pyplot as plt  \n",
    " \n",
    "im = Image.open('0.jpg').convert('L')    \n",
    "im = im.resize((28, 28), Image.ANTIALIAS)  \n",
    "img = np.array(im).reshape(1, 1, 28, 28).astype('float32')   \n",
    "img = 1.0 - img / 255."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAJIAAACPCAYAAAARM4LLAAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADl0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uIDIuMi4zLCBodHRwOi8vbWF0cGxvdGxpYi5vcmcvIxREBQAAC5NJREFUeJztnWtoFdsVx//LaFSsr9RXTIJXfFbFZ6yvgoUasD64+qFgxCJ4xS8ttNgP9VpBUMSgIvihiBeUW7He+oRc8HGtV6UpaEnER2JFE4s2wbdBa/CBj90POZ671zKZOXNmZ2YS1w/Cmf/MnLP3CevsvWbvtdcmYwwUJSyd4q6A0jFQQ1KcoIakOEENSXGCGpLiBDUkxQlqSIoT1JAUJ4QyJCKaS0Q3iKiOiNa4qpTS/qBsR7aJKAfATQAlABoAVAIoNcb8u7X35OXlmcLCwqzKU+Khurr6sTGmv999nUOU8VMAdcaY/wAAEf0NwOcAWjWkwsJCHD9+PESRbYf8QUndqVMnz+s2ROSuYi3w/v17pr3qFrYuRUVFdzK5L0zXVgCg3tINqXMMIlpFRFVEVNXY2BiiOCXJhDGklkz9o5+pMeYrY0yxMaY4Ly8vRHFKkgnTtTUAKLJ0IYC7Xm/Izc1FR/GR7O5Fdh9t3bUlkTAtUiWAEUQ0lIhyASwB8K2baintjaxbJGPMWyL6LYDvAOQA2GOMueasZkq7IkzXBmPMcQDJfAxTIiWUIWVDW0Vkys+Vfop8ZJa6c2f+r/Dzc+QjdxBk2RJZtt9387seBTpFojhBDUlxghqS4oTIfSSv/ttlXy/fK32anJwcpqXfcv78eaYvX77M9Lt371otq1+/fkxPnDiR6dGjRzPt5zf6jVN5+UxR+UvaIilOUENSnKCGpDghch8pCEF8Jj9fQF5vampi+vTp00xv376d6YqKCqZtP+jZs2fsmvS/SktLmd64cSPTgwcP9qyrX4hLEub2tEVSnKCGpDghUV2bX5PuhbxXPs7fucMD/Y4cOcL0tm3bmO7SpQvTK1euZHrBggXp4/r6enbt4MGDTMtus1u3bkwvW7aM6WnTpjHtN90TdHqnLdAWSXGCGpLiBDUkxQmJ8pGCPNb6raRoaGhguqysjOm9e/cyLePJ165dy/SKFSuYln6OTVFREdNbt25leufOnUw/evSIaTn0ID/PD50iUdotakiKE9SQFCfE6iOFCbuVPpKc8igvL2f68OHDTPft25dp6QOtWrWKaa+wE+mHyDGonj17Mi3HfWpqapg+efIk00uXLmXayz+LC22RFCeoISlOUENSnBCrjxR0jMP2S6SfcebMGab379/PtPShFi1axPS6deuY9ltuZF+X/tq8efOYfvXqFdP3799n+to1vq5UjjPJMa6SkhKme/XqxbQdBix9u7YaV9IWSXGCGpLiBDUkxQmJmmuz+3Yg2NjNxYsXmZZ+R3FxMdMLFy5kumvXroHq6rXsWl6zY5eAj0Nrd+3axfSBAweYlv6b/O7yu9j+o/yfSt/PVUoebZEUJ/gaEhHtIaKHRFRjncsjor8TUW3qta/XZygdn0xapK8BzBXn1gD43hgzAsD3Ka18wvj6SMaYfxDRZ+L05wB+njr+C4BzAP4YtjLSJ5Jzcfb1169fs2t1dXVM9+jRg+klS5YwPX/+fKalXyPLDhMXnZuby7SMyR45ciTT+fn5TG/evJnpTZs2MT1gwACmZ82alT6W9W4rsvWRBhpj7gFA6nWAz/1KB6fNnW07PbKMBFQ6Dtka0gMiygeA1OvD1m600yP37++bQF5pp2TbgX4LYDmAstRrufftPxAmnti+//bt2+ya9JGk3zBu3Dimpd/iFwMuse8PsitAS2XJubTly5cz/fTpU6YPHTrE9O7du5kePnx4+njgwIHsWmxzbUT0DYDzAEYRUQMRfYFmAyoholo070VS5vUZSscnk6e20lYu/cJxXZR2jI5sK06INfVf0JTG9jiS3GXpypUrTMtxIznX5le2Xx4CLx/Kz0fy86lGjRrF9OrVq5m+dOkS0zKPgR0zLtfU9e7dm2m/ubhM0RZJcYIakuIENSTFCbFuIRF0K4QXL16kj6VP9ObNG6ZlimKvuGbA3zcIMv4S1M/wy+00ZMgQptevX8/0mjV8zvzmzZvp47dv33qW7WpcSVskxQlqSIoTErVk26+rs7uj58+fs2sy9cukSZMC1SXMjkOyK/LrLoJuvCyXgE+fPt1T20u+5VCBHWICAN27d/csO1O0RVKcoIakOEENSXFCoqZIgkwt+IViyPCJOAkTLtMS0qcqKChg+u7dHzY7l2kEZUzYhAkTmFYfSYkVNSTFCWpIihPa1S7btl8kpzj8doz0q4dfqK3XuFLQKZGw0xJy7Gf27NlM22ElJ06cYNdkikPpI+mSbSVW1JAUJ6ghKU5I9C7bEtsXkUuR5ZYRtbW1TM+ZMyfTKgYm6HhY0FQyfnNvU6ZMYXrGjBnp46tXr7JrMg2hK7RFUpyghqQ4QQ1JcUKiUv9J5NiOvcxaxh89fvyYaTvcNBNchtr6xRsF3RHcL95JLum+detW+lhuXyFDjiU616bEihqS4gQ1JMUJiVqO5KftsaPJkyeza3L+6cGDB0w3NjYyLeOXgi5PCvI9/Ob1vFIctlQXmfZQbgd/9uzZ9LFcql5YWAgvdK5NiZVM8iMVEdFZIrpORNeI6Hep85oiWUmTSYv0FsAfjDE/ATAdwG+IaAw0RbJikUmirXsAPmSwfU5E1wEUIMsUyV4x2173SmSccp8+fZi+cOEC0+fOnWNabusgUwH6EcRHCor0oewYbID7QACwY8cOpocOHZo+Xrx4Mbsm/2+SSMaRUvm2JwH4FzRFsmKRsSER0Y8AHAHwe2PM/wK8T9MjfwJkZEhE1AXNRvRXY8zR1OmMUiRreuRPA18fiZo7/N0Arhtj7EVSWadItj470P12/y3X9st4o6NHjzJdVsYT7z558oRp6TPJtDgy/slOFyPHee7du8f0y5cvmX74kP/m5FyZTPV86tQppuUWYmPHjmXa3mp+7ly+jYzc6j1o3oLWyGRAchaAXwOoJqLLqXNr0WxAB1Ppkv8L4FdZ1UDpEGTy1PZPAK2ZqaZIVgDoyLbiiESva/O6V/owtl8AfOyXlJdzF27Lli1M19fXMy1zDslxKjv+SfoVx44dY1rGk0sfR26HIf0Y+V1nzpzJ9IYNG5i20yuHWUcYBG2RFCeoISlOUENSnEBR9aEAUFxcbCorK9Pab3t2r9hlvxyP1dXVTMv5qIqKCqZlzLes26BBg1q9X9ZFbv0ux5nkwKyMP5dr+eUY2bBhw5iW267a5QVdM9fCvOFFYwwPamoBbZEUJ6ghKU6ItWvza3a9hu/9wlklcsK4qqqK6X379jFtpxgGgKlTpzJtP2LL7Ppjxoxhevz48UzLcFe5REjuYOTX5QdZBp9FGkLt2pToUENSnKCGpDghch9J+iZRIb+nXNIjQzmampqYltMWtvZ7/JfvlWlpwoTTZPP+IKiPpESKGpLiBDUkxQmJTmvjEulHSL9FToEkmbb0ibJFWyTFCWpIihPUkBQnfDI+ksRv/CzMcvIw97ZXtEVSnKCGpDhBDUlxQqRzbUT0CMAdAP0APPa5PS6SWre46jXEGOObtCFSQ0oXSlSVyURgHCS1bkmt1we0a1OcoIakOCEuQ/oqpnIzIal1S2q9AMTkIykdD+3aFCdEakhENJeIbhBRHRHFmk6ZiPYQ0UMiqrHOJSJ3eHvMbR6ZIRFRDoA/A/glgDEASlP5uuPiawBzxbmk5A5vf7nNjTGR/AGYAeA7S38J4Muoym+lTp8BqLH0DQD5qeN8ADfirJ9Vr3IAJUmtnzEm0q6tAICdzaohdS5JJC53eHvJbR6lIbUUS6GPjB5km9s8DqI0pAYAdv6WQgB3W7k3LjLKHR4FYXKbx0GUhlQJYAQRDSWiXABL0JyrO0l8yB0OZJk73AUZ5DYHYqxfi0TsNM4DcBPALQB/itmB/QbNm/W8QXNr+QWAH6P5aag29ZoXU91+huZu/yqAy6m/eUmpX0t/OrKtOEFHthUnqCEpTlBDUpyghqQ4QQ1JcYIakuIENSTFCWpIihP+DyUTE/Sy4S7uAAAAAElFTkSuQmCC\n",
      "text/plain": [
       "<Figure size 144x144 with 1 Axes>"
      ]
     },
     "metadata": {
      "needs_background": "light"
     },
     "output_type": "display_data"
    }
   ],
   "source": [
    "plt.figure(figsize=(2, 2))  \n",
    "plt.imshow(img[0][0], cmap=plt.cm.binary)  \n",
    "plt.show()  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "本次预测的数字是： 0\n"
     ]
    }
   ],
   "source": [
    "model = LeNetModel()  \n",
    " \n",
    "params_file_path = 'mnist-cnn.pdparams'  \n",
    "param_dict = paddle.load(params_file_path)  \n",
    "model.load_dict(param_dict)  \n",
    " \n",
    "model.eval()  \n",
    "tensor_img = img  \n",
    "\n",
    "results = model(paddle.to_tensor(tensor_img))\n",
    "\n",
    "lab = np.argsort(results.numpy())  \n",
    "print(\"本次预测的数字是：\",lab[0][-1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
