{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.5.2\n"
     ]
    }
   ],
   "source": [
    "import paddle\n",
    "import paddle.nn.functional as F\n",
    "from paddle.nn import Linear\n",
    "import numpy as np\n",
    "import os\n",
    "import json\n",
    "import random\n",
    "\n",
    "print(paddle.__version__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "from paddle.nn import Conv2D,MaxPool2D"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data(mode='train'):\n",
    "    with open(r'../desktop/new_mnist.json') as f:\n",
    "        data=json.load(f)\n",
    "    \n",
    "    train_set,val_set,eval_set=data\n",
    "    if mode=='train':\n",
    "        imgs,labels=train_set[0],train_set[1]\n",
    "    elif mode=='valid':\n",
    "        imgs,labels=val_set[0],val_set[1]\n",
    "    elif mode=='eval':\n",
    "        imgs,labels=eval_set[0],eval_set[1]\n",
    "    else:\n",
    "        raise Exception(\"mode can only be one of['train','valid','eval']\")\n",
    "    print(\"训练集数量：\",len(imgs))\n",
    "    \n",
    "    imgs_length =len(imgs)\n",
    "    \n",
    "    index_list =list(range(imgs_length))\n",
    "    BATCHSIZE=100\n",
    "    \n",
    "    def data_generator():\n",
    "        if mode == 'train':\n",
    "            random.shuffle(index_list)\n",
    "        imgs_list=[]\n",
    "        labels_list=[]\n",
    "        for i in index_list:\n",
    "            \n",
    "            img = np.reshape(imgs[i],[1,28,28]).astype('float32')\n",
    "            \n",
    "            label = np.reshape(labels[i],[1]).astype('int64')\n",
    "            imgs_list.append(img)\n",
    "            labels_list.append(label)\n",
    "            if len(imgs_list) ==BATCHSIZE:\n",
    "                yield np.array(imgs_list),np.array(labels_list)\n",
    "                imgs_list =[]\n",
    "                labels_list =[]\n",
    "                    \n",
    "            if len(imgs_list) >0:\n",
    "                yield np.array(imgs_list), np.array(labels_list)\n",
    "    return data_generator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LeNetModel(paddle.nn.Layer):\n",
    "    def __init__(self):\n",
    "        super(LeNetModel,self).__init__()\n",
    "        \n",
    "        self.conv1 = paddle.nn.Conv2D(in_channels=1,out_channels=6, kernel_size=5,stride=1)\n",
    "        self.pool1 = paddle.nn.MaxPool2D(kernel_size=2,stride=2)\n",
    "        self.conv2 = paddle.nn.Conv2D(in_channels=6,out_channels=16,kernel_size=5,stride=1)\n",
    "        self.pool2 = paddle.nn.MaxPool2D(kernel_size=2,stride=2)\n",
    "        self.fc1=paddle.nn.Linear(256,120)\n",
    "        self.fc2=paddle.nn.Linear(120,84)\n",
    "        self.fc3=paddle.nn.Linear(84,10)\n",
    "        \n",
    "    def forward(self,x):\n",
    "        x=self.conv1(x)\n",
    "        x=F.relu(x)\n",
    "        x=self.pool1(x)\n",
    "        x=self.conv2(x)\n",
    "        x=F.relu(x)\n",
    "        x=self.pool2(x)\n",
    "        x=paddle.flatten(x,start_axis=1,stop_axis=-1)\n",
    "        x=self.fc1(x)\n",
    "        x=F.relu(x)\n",
    "        x=self.fc2(x)\n",
    "        x =F.relu(x)\n",
    "        x=self.fc3(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model):\n",
    "    model.train()\n",
    "    opt = paddle.optimizer.SGD(learning_rate=0.01,parameters=model.parameters())\n",
    "    EPOCH_NUM =5\n",
    "\n",
    "    for epoch_id in range(EPOCH_NUM):\n",
    "        for batch_id,data in enumerate(train_loader()):\n",
    "            images,labels = data\n",
    "            images = paddle.to_tensor(images)\n",
    "            labels = paddle.to_tensor(labels)\n",
    "        \n",
    "            predicts = model(images)\n",
    "        \n",
    "            loss = F.softmax_with_cross_entropy(predicts,labels)\n",
    "            avg_loss = paddle.mean(loss)\n",
    "        \n",
    "            if batch_id % 200 ==0:\n",
    "                print( \"epoch: {}, batch: {}, loss is; {}\" .format(epoch_id, batch_id, avg_loss.numpy()))\n",
    "        \n",
    "            avg_loss.backward()\n",
    "            opt.step()\n",
    "            opt.clear_grad()\n",
    "        \n",
    "        model.eval()\n",
    "        accuracies =[]\n",
    "        losses =[]\n",
    "        for batch_id, data in enumerate(valid_loader()):\n",
    "            images, labels = data\n",
    "            images = paddle.to_tensor(images)\n",
    "            labels = paddle.to_tensor(labels)\n",
    "            logits = model(images)\n",
    "            pred = F.softmax(logits)\n",
    "            loss = F.softmax_with_cross_entropy(logits, labels)\n",
    "            acc = paddle.metric.accuracy(pred,labels)\n",
    "            accuracies.append(acc.numpy())\n",
    "            \n",
    "            losses.append(loss.numpy())\n",
    "            print(\"[validation] accuracy/loss: {}/{}\" .format(np.mean(accuracies),np.mean(losses)))\n",
    "        model.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集数量： 50000\n",
      "训练集数量： 10000\n",
      "epoch: 0, batch: 0, loss is; [3.4937344]\n",
      "epoch: 0, batch: 200, loss is; [1.0589043]\n",
      "epoch: 0, batch: 400, loss is; [2.50412]\n",
      "epoch: 0, batch: 600, loss is; [0.00285777]\n",
      "epoch: 0, batch: 800, loss is; [0.61777204]\n",
      "epoch: 0, batch: 1000, loss is; [5.3268867]\n",
      "epoch: 0, batch: 1200, loss is; [2.7428348]\n",
      "epoch: 0, batch: 1400, loss is; [0.01146209]\n",
      "epoch: 0, batch: 1600, loss is; [1.6689314e-06]\n",
      "epoch: 0, batch: 1800, loss is; [0.0483994]\n",
      "epoch: 0, batch: 2000, loss is; [0.00014306]\n",
      "epoch: 0, batch: 2200, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 2400, loss is; [2.1693516]\n",
      "epoch: 0, batch: 2600, loss is; [0.02033796]\n",
      "epoch: 0, batch: 2800, loss is; [0.00211731]\n",
      "epoch: 0, batch: 3000, loss is; [7.2158113]\n",
      "epoch: 0, batch: 3200, loss is; [8.893409e-05]\n",
      "epoch: 0, batch: 3400, loss is; [0.05089259]\n",
      "epoch: 0, batch: 3600, loss is; [0.0001253]\n",
      "epoch: 0, batch: 3800, loss is; [8.130404e-05]\n",
      "epoch: 0, batch: 4000, loss is; [1.9073505e-06]\n",
      "epoch: 0, batch: 4200, loss is; [0.]\n",
      "epoch: 0, batch: 4400, loss is; [0.000192]\n",
      "epoch: 0, batch: 4600, loss is; [7.844279e-05]\n",
      "epoch: 0, batch: 4800, loss is; [0.00066845]\n",
      "epoch: 0, batch: 5000, loss is; [7.15256e-07]\n",
      "epoch: 0, batch: 5200, loss is; [0.]\n",
      "epoch: 0, batch: 5400, loss is; [6.807082e-05]\n",
      "epoch: 0, batch: 5600, loss is; [0.00025067]\n",
      "epoch: 0, batch: 5800, loss is; [0.00011874]\n",
      "epoch: 0, batch: 6000, loss is; [2.384186e-07]\n",
      "epoch: 0, batch: 6200, loss is; [0.04955562]\n",
      "epoch: 0, batch: 6400, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 6600, loss is; [0.00210662]\n",
      "epoch: 0, batch: 6800, loss is; [1.4305125e-06]\n",
      "epoch: 0, batch: 7000, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 7200, loss is; [0.]\n",
      "epoch: 0, batch: 7400, loss is; [0.]\n",
      "epoch: 0, batch: 7600, loss is; [0.]\n",
      "epoch: 0, batch: 7800, loss is; [0.11588535]\n",
      "epoch: 0, batch: 8000, loss is; [7.748634e-06]\n",
      "epoch: 0, batch: 8200, loss is; [1.3113031e-06]\n",
      "epoch: 0, batch: 8400, loss is; [0.03402756]\n",
      "epoch: 0, batch: 8600, loss is; [7.2005e-05]\n",
      "epoch: 0, batch: 8800, loss is; [4.0889623e-05]\n",
      "epoch: 0, batch: 9000, loss is; [2.7418175e-06]\n",
      "epoch: 0, batch: 9200, loss is; [10.707486]\n",
      "epoch: 0, batch: 9400, loss is; [0.00155528]\n",
      "epoch: 0, batch: 9600, loss is; [0.]\n",
      "epoch: 0, batch: 9800, loss is; [0.00034648]\n",
      "epoch: 0, batch: 10000, loss is; [0.00059455]\n",
      "epoch: 0, batch: 10200, loss is; [1.443475]\n",
      "epoch: 0, batch: 10400, loss is; [0.00123517]\n",
      "epoch: 0, batch: 10600, loss is; [0.]\n",
      "epoch: 0, batch: 10800, loss is; [9.655999e-06]\n",
      "epoch: 0, batch: 11000, loss is; [0.]\n",
      "epoch: 0, batch: 11200, loss is; [6.7949527e-06]\n",
      "epoch: 0, batch: 11400, loss is; [1.2755475e-05]\n",
      "epoch: 0, batch: 11600, loss is; [0.10314736]\n",
      "epoch: 0, batch: 11800, loss is; [0.01122764]\n",
      "epoch: 0, batch: 12000, loss is; [6.4373226e-06]\n",
      "epoch: 0, batch: 12200, loss is; [9.447928]\n",
      "epoch: 0, batch: 12400, loss is; [0.00028334]\n",
      "epoch: 0, batch: 12600, loss is; [1.0728842e-06]\n",
      "epoch: 0, batch: 12800, loss is; [7.271793e-06]\n",
      "epoch: 0, batch: 13000, loss is; [2.384186e-07]\n",
      "epoch: 0, batch: 13200, loss is; [5.591072e-05]\n",
      "epoch: 0, batch: 13400, loss is; [0.00023398]\n",
      "epoch: 0, batch: 13600, loss is; [0.00878149]\n",
      "epoch: 0, batch: 13800, loss is; [0.]\n",
      "epoch: 0, batch: 14000, loss is; [5.960466e-07]\n",
      "epoch: 0, batch: 14200, loss is; [0.05315917]\n",
      "epoch: 0, batch: 14400, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 14600, loss is; [2.6226078e-06]\n",
      "epoch: 0, batch: 14800, loss is; [0.00162364]\n",
      "epoch: 0, batch: 15000, loss is; [3.5762793e-07]\n",
      "epoch: 0, batch: 15200, loss is; [0.00132613]\n",
      "epoch: 0, batch: 15400, loss is; [0.]\n",
      "epoch: 0, batch: 15600, loss is; [0.]\n",
      "epoch: 0, batch: 15800, loss is; [0.00036437]\n",
      "epoch: 0, batch: 16000, loss is; [0.00011016]\n",
      "epoch: 0, batch: 16200, loss is; [0.04979697]\n",
      "epoch: 0, batch: 16400, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 16600, loss is; [0.00021478]\n",
      "epoch: 0, batch: 16800, loss is; [9.628729]\n",
      "epoch: 0, batch: 17000, loss is; [0.00025937]\n",
      "epoch: 0, batch: 17200, loss is; [2.0981055e-05]\n",
      "epoch: 0, batch: 17400, loss is; [0.00132792]\n",
      "epoch: 0, batch: 17600, loss is; [0.13896374]\n",
      "epoch: 0, batch: 17800, loss is; [0.15775852]\n",
      "epoch: 0, batch: 18000, loss is; [0.0003571]\n",
      "epoch: 0, batch: 18200, loss is; [0.01216612]\n",
      "epoch: 0, batch: 18400, loss is; [0.13122976]\n",
      "epoch: 0, batch: 18600, loss is; [4.768373e-07]\n",
      "epoch: 0, batch: 18800, loss is; [0.]\n",
      "epoch: 0, batch: 19000, loss is; [2.384186e-07]\n",
      "epoch: 0, batch: 19200, loss is; [0.00097]\n",
      "epoch: 0, batch: 19400, loss is; [1.0728842e-06]\n",
      "epoch: 0, batch: 19600, loss is; [1.1444157e-05]\n",
      "epoch: 0, batch: 19800, loss is; [0.00352239]\n",
      "epoch: 0, batch: 20000, loss is; [0.]\n",
      "epoch: 0, batch: 20200, loss is; [3.2187025e-05]\n",
      "epoch: 0, batch: 20400, loss is; [0.00081483]\n",
      "epoch: 0, batch: 20600, loss is; [7.450858e-05]\n",
      "epoch: 0, batch: 20800, loss is; [0.]\n",
      "epoch: 0, batch: 21000, loss is; [0.]\n",
      "epoch: 0, batch: 21200, loss is; [0.]\n",
      "epoch: 0, batch: 21400, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 21600, loss is; [0.00010431]\n",
      "epoch: 0, batch: 21800, loss is; [1.1324947e-05]\n",
      "epoch: 0, batch: 22000, loss is; [4.768373e-07]\n",
      "epoch: 0, batch: 22200, loss is; [0.]\n",
      "epoch: 0, batch: 22400, loss is; [0.]\n",
      "epoch: 0, batch: 22600, loss is; [0.]\n",
      "epoch: 0, batch: 22800, loss is; [2.384186e-07]\n",
      "epoch: 0, batch: 23000, loss is; [0.03369763]\n",
      "epoch: 0, batch: 23200, loss is; [4.768373e-07]\n",
      "epoch: 0, batch: 23400, loss is; [0.00016512]\n",
      "epoch: 0, batch: 23600, loss is; [0.00226719]\n",
      "epoch: 0, batch: 23800, loss is; [2.9802368e-06]\n",
      "epoch: 0, batch: 24000, loss is; [0.00033647]\n",
      "epoch: 0, batch: 24200, loss is; [8.8215265e-06]\n",
      "epoch: 0, batch: 24400, loss is; [4.768373e-07]\n",
      "epoch: 0, batch: 24600, loss is; [12.5701685]\n",
      "epoch: 0, batch: 24800, loss is; [4.0651194e-05]\n",
      "epoch: 0, batch: 25000, loss is; [0.00014521]\n",
      "epoch: 0, batch: 25200, loss is; [0.]\n",
      "epoch: 0, batch: 25400, loss is; [2.3961355e-05]\n",
      "epoch: 0, batch: 25600, loss is; [0.00013877]\n",
      "epoch: 0, batch: 25800, loss is; [9.537198e-05]\n",
      "epoch: 0, batch: 26000, loss is; [1.9073505e-06]\n",
      "epoch: 0, batch: 26200, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 26400, loss is; [0.00010324]\n",
      "epoch: 0, batch: 26600, loss is; [0.00023135]\n",
      "epoch: 0, batch: 26800, loss is; [0.0010518]\n",
      "epoch: 0, batch: 27000, loss is; [0.1368058]\n",
      "epoch: 0, batch: 27200, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 27400, loss is; [6.3181124e-06]\n",
      "epoch: 0, batch: 27600, loss is; [0.03160707]\n",
      "epoch: 0, batch: 27800, loss is; [0.00428036]\n",
      "epoch: 0, batch: 28000, loss is; [9.536748e-07]\n",
      "epoch: 0, batch: 28200, loss is; [3.5762793e-07]\n",
      "epoch: 0, batch: 28400, loss is; [0.03473434]\n",
      "epoch: 0, batch: 28600, loss is; [0.00051404]\n",
      "epoch: 0, batch: 28800, loss is; [0.01733999]\n",
      "epoch: 0, batch: 29000, loss is; [6.9739865e-05]\n",
      "epoch: 0, batch: 29200, loss is; [0.]\n",
      "epoch: 0, batch: 29400, loss is; [0.]\n",
      "epoch: 0, batch: 29600, loss is; [1.1080115]\n",
      "epoch: 0, batch: 29800, loss is; [7.987341e-05]\n",
      "epoch: 0, batch: 30000, loss is; [2.2173173e-05]\n",
      "epoch: 0, batch: 30200, loss is; [5.960466e-07]\n",
      "epoch: 0, batch: 30400, loss is; [6.103702e-05]\n",
      "epoch: 0, batch: 30600, loss is; [3.5762793e-07]\n",
      "epoch: 0, batch: 30800, loss is; [0.]\n",
      "epoch: 0, batch: 31000, loss is; [0.]\n",
      "epoch: 0, batch: 31200, loss is; [2.7418175e-06]\n",
      "epoch: 0, batch: 31400, loss is; [2.384186e-07]\n",
      "epoch: 0, batch: 31600, loss is; [0.0033621]\n",
      "epoch: 0, batch: 31800, loss is; [0.00080934]\n",
      "epoch: 0, batch: 32000, loss is; [9.536748e-07]\n",
      "epoch: 0, batch: 32200, loss is; [0.]\n",
      "epoch: 0, batch: 32400, loss is; [2.2888446e-05]\n",
      "epoch: 0, batch: 32600, loss is; [3.5762793e-07]\n",
      "epoch: 0, batch: 32800, loss is; [8.225475e-06]\n",
      "epoch: 0, batch: 33000, loss is; [0.]\n",
      "epoch: 0, batch: 33200, loss is; [1.2423445]\n",
      "epoch: 0, batch: 33400, loss is; [2.384186e-07]\n",
      "epoch: 0, batch: 33600, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 33800, loss is; [0.00047755]\n",
      "epoch: 0, batch: 34000, loss is; [8.940737e-06]\n",
      "epoch: 0, batch: 34200, loss is; [1.4305125e-06]\n",
      "epoch: 0, batch: 34400, loss is; [0.00549992]\n",
      "epoch: 0, batch: 34600, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 34800, loss is; [3.5762793e-07]\n",
      "epoch: 0, batch: 35000, loss is; [0.]\n",
      "epoch: 0, batch: 35200, loss is; [0.]\n",
      "epoch: 0, batch: 35400, loss is; [0.]\n",
      "epoch: 0, batch: 35600, loss is; [4.4107533e-06]\n",
      "epoch: 0, batch: 35800, loss is; [1.0728842e-06]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch: 0, batch: 36000, loss is; [9.739873e-05]\n",
      "epoch: 0, batch: 36200, loss is; [0.]\n",
      "epoch: 0, batch: 36400, loss is; [2.264979e-06]\n",
      "epoch: 0, batch: 36600, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 36800, loss is; [0.]\n",
      "epoch: 0, batch: 37000, loss is; [0.]\n",
      "epoch: 0, batch: 37200, loss is; [0.]\n",
      "epoch: 0, batch: 37400, loss is; [0.00020071]\n",
      "epoch: 0, batch: 37600, loss is; [0.09903356]\n",
      "epoch: 0, batch: 37800, loss is; [0.00047075]\n",
      "epoch: 0, batch: 38000, loss is; [0.00124866]\n",
      "epoch: 0, batch: 38200, loss is; [0.00021764]\n",
      "epoch: 0, batch: 38400, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 38600, loss is; [0.]\n",
      "epoch: 0, batch: 38800, loss is; [1.9312092e-05]\n",
      "epoch: 0, batch: 39000, loss is; [0.]\n",
      "epoch: 0, batch: 39200, loss is; [0.00024041]\n",
      "epoch: 0, batch: 39400, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 39600, loss is; [0.]\n",
      "epoch: 0, batch: 39800, loss is; [0.00037236]\n",
      "epoch: 0, batch: 40000, loss is; [0.]\n",
      "epoch: 0, batch: 40200, loss is; [0.]\n",
      "epoch: 0, batch: 40400, loss is; [0.]\n",
      "epoch: 0, batch: 40600, loss is; [8.3446537e-07]\n",
      "epoch: 0, batch: 40800, loss is; [3.8862985e-05]\n",
      "epoch: 0, batch: 41000, loss is; [5.6028525e-06]\n",
      "epoch: 0, batch: 41200, loss is; [1.5616539e-05]\n",
      "epoch: 0, batch: 41400, loss is; [2.5749538e-05]\n",
      "epoch: 0, batch: 41600, loss is; [3.5762793e-07]\n",
      "epoch: 0, batch: 41800, loss is; [0.]\n",
      "epoch: 0, batch: 42000, loss is; [3.8405478]\n",
      "epoch: 0, batch: 42200, loss is; [9.77521e-06]\n",
      "epoch: 0, batch: 42400, loss is; [7.844279e-05]\n",
      "epoch: 0, batch: 42600, loss is; [0.]\n",
      "epoch: 0, batch: 42800, loss is; [1.6689314e-06]\n",
      "epoch: 0, batch: 43000, loss is; [0.]\n",
      "epoch: 0, batch: 43200, loss is; [1.7166285e-05]\n",
      "epoch: 0, batch: 43400, loss is; [7.856201e-05]\n",
      "epoch: 0, batch: 43600, loss is; [7.15256e-07]\n",
      "epoch: 0, batch: 43800, loss is; [0.]\n",
      "epoch: 0, batch: 44000, loss is; [0.]\n",
      "epoch: 0, batch: 44200, loss is; [0.]\n",
      "epoch: 0, batch: 44400, loss is; [0.00033313]\n",
      "epoch: 0, batch: 44600, loss is; [0.]\n",
      "epoch: 0, batch: 44800, loss is; [0.00198621]\n",
      "epoch: 0, batch: 45000, loss is; [9.536748e-07]\n",
      "epoch: 0, batch: 45200, loss is; [0.]\n",
      "epoch: 0, batch: 45400, loss is; [1.9323426]\n",
      "epoch: 0, batch: 45600, loss is; [1.192093e-07]\n",
      "epoch: 0, batch: 45800, loss is; [3.0518044e-05]\n",
      "epoch: 0, batch: 46000, loss is; [0.0003094]\n",
      "epoch: 0, batch: 46200, loss is; [0.]\n",
      "epoch: 0, batch: 46400, loss is; [4.768373e-07]\n",
      "epoch: 0, batch: 46600, loss is; [3.9339143e-06]\n",
      "epoch: 0, batch: 46800, loss is; [0.]\n",
      "epoch: 0, batch: 47000, loss is; [0.]\n",
      "epoch: 0, batch: 47200, loss is; [0.00839535]\n",
      "epoch: 0, batch: 47400, loss is; [6.914163e-06]\n",
      "epoch: 0, batch: 47600, loss is; [2.1457695e-06]\n",
      "epoch: 0, batch: 47800, loss is; [0.]\n",
      "epoch: 0, batch: 48000, loss is; [7.15256e-07]\n",
      "epoch: 0, batch: 48200, loss is; [0.00491592]\n",
      "epoch: 0, batch: 48400, loss is; [0.]\n",
      "epoch: 0, batch: 48600, loss is; [0.00024685]\n",
      "epoch: 0, batch: 48800, loss is; [0.]\n",
      "epoch: 0, batch: 49000, loss is; [0.]\n",
      "epoch: 0, batch: 49200, loss is; [0.]\n",
      "epoch: 0, batch: 49400, loss is; [0.00017913]\n",
      "epoch: 0, batch: 49600, loss is; [1.1743785]\n",
      "epoch: 0, batch: 49800, loss is; [0.]\n",
      "[validation] accuracy/loss: 1.0/0.0\n",
      "[validation] accuracy/loss: 1.0/[[-0.0000000e+00]\n",
      " [ 1.1920944e-06]]\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "operands could not be broadcast together with shapes (2,1) (3,1) ",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mValueError\u001b[0m                                Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-12-22eca3899fb2>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[0mmodel\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mLeNetModel\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 6\u001b[1;33m \u001b[0mtrain\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;32m<ipython-input-11-1584d464bd1f>\u001b[0m in \u001b[0;36mtrain\u001b[1;34m(model)\u001b[0m\n\u001b[0;32m     36\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     37\u001b[0m             \u001b[0mlosses\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mappend\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mnumpy\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 38\u001b[1;33m             \u001b[0mprint\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m\"[validation] accuracy/loss: {}/{}\"\u001b[0m \u001b[1;33m.\u001b[0m\u001b[0mformat\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0maccuracies\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mlosses\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     39\u001b[0m         \u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtrain\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\numpy\\core\\fromnumeric.py\u001b[0m in \u001b[0;36mmean\u001b[1;34m(a, axis, dtype, out, keepdims)\u001b[0m\n\u001b[0;32m   2918\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2919\u001b[0m     return _methods._mean(a, axis=axis, dtype=dtype,\n\u001b[1;32m-> 2920\u001b[1;33m                           out=out, **kwargs)\n\u001b[0m\u001b[0;32m   2921\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m   2922\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32m~\\Anaconda3\\lib\\site-packages\\numpy\\core\\_methods.py\u001b[0m in \u001b[0;36m_mean\u001b[1;34m(a, axis, dtype, out, keepdims)\u001b[0m\n\u001b[0;32m     73\u001b[0m             \u001b[0mis_float16_result\u001b[0m \u001b[1;33m=\u001b[0m \u001b[1;32mTrue\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     74\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 75\u001b[1;33m     \u001b[0mret\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mumr_sum\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0marr\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0maxis\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdtype\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mout\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mkeepdims\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     76\u001b[0m     \u001b[1;32mif\u001b[0m \u001b[0misinstance\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mret\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mmu\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mndarray\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     77\u001b[0m         ret = um.true_divide(\n",
      "\u001b[1;31mValueError\u001b[0m: operands could not be broadcast together with shapes (2,1) (3,1) "
     ]
    }
   ],
   "source": [
    "train_loader=load_data('train')\n",
    "\n",
    "valid_loader=load_data('valid')\n",
    "\n",
    "model=LeNetModel()\n",
    "train(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "paddle.save(model.state_dict(),'mnist-cnn.pdparams')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from PIL import Image\n",
    "import numpy as np\n",
    "\n",
    "im= Image.open('../desktop/0.jpg').convert('L')\n",
    "im = im.resize((28,28),Image.ANTIALIAS)\n",
    "img = np.array(im),reshape(1,1,28,28).astype('float32')\n",
    "img =1.0-img/255."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "\n",
    "plt.figure(figsize=(2,2))\n",
    "plt.imshow(im,cmap=plt.cm.binary)\n",
    "plt.show() "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model=LeNetModel()\n",
    "\n",
    "params_file_path = 'mnist-cnn.pdparam'\n",
    "param_dict = paddle.load(params_file_path)\n",
    "model.load_dict(param_dict)\n",
    "\n",
    "model.eval()\n",
    "tensor_img =img\n",
    "results = model(paddle.to_tensor(tensor_img))\n",
    "lab = np.argsort(results.numpy())\n",
    "print('本次预测的数字是:',lab[0][-1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data(mode='train'):\n",
    "    with open(r'../desktop/new_mnist.json') as f:\n",
    "        data=json.load(f)\n",
    "    \n",
    "    train_set,val_set,eval_set=data\n",
    "    if mode=='train':\n",
    "        imgs,labels=train_set[0],train_set[1]\n",
    "    elif mode=='valid':\n",
    "        imgs,labels=val_set[0],val_set[1]\n",
    "    elif mode=='eval':\n",
    "        imgs,labels=eval_set[0],eval_set[1]\n",
    "    else:\n",
    "        raise Exception(\"mode can only be one of['train','valid','eval']\")\n",
    "    print(\"训练数据集数量：\",len(imgs))\n",
    "    \n",
    "    imgs_length =len(imgs)\n",
    "    \n",
    "    index_list =list(range(imgs_length))\n",
    "    BATCHSIZE=100\n",
    "    \n",
    "    def data_generator():\n",
    "        if mode == 'train':\n",
    "            random.shuffle(index_list)\n",
    "            imgs_list=[]\n",
    "            labels_list=[]\n",
    "            for i in index_list:\n",
    "                img = np.array(imgs[i]).astype('float32')\n",
    "                label = np.reshape(labels[i], [1]).astype('int64')\n",
    "                imgs_list.append(img)\n",
    "                labels_list.append(label)\n",
    "                if len(imgs_list) ==BATCHSIZE:\n",
    "                    yield np.array(imgs_list),np.array(labels_list)\n",
    "                    imgs_list =[]\n",
    "                    labels_list =[]\n",
    "                    \n",
    "                if len(imgs_list) >0:\n",
    "                    yield np.array(imgs_list), np.array(labels_list)\n",
    "    return data_generator"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
