{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 32,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.5.2\n"
     ]
    }
   ],
   "source": [
    "import paddle\n",
    "import paddle.nn.functional as F\n",
    "from paddle.nn import Linear\n",
    "import numpy as np\n",
    "import os\n",
    "import json\n",
    "import random\n",
    "print(paddle.__version__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [],
   "source": [
    "from paddle.nn import Conv2D,MaxPool2D"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data(mode='train'):\n",
    "    with open('mnist.json')as f:\n",
    "        data=json.load(f)\n",
    "    train_set,val_set,eval_set=data\n",
    "    if mode=='train':\n",
    "        imgs,labels=train_set[0],train_set[1]\n",
    "    elif mode=='valid':\n",
    "        imgs,labels=val_set[0],val_set[1]\n",
    "    elif model=='eval':\n",
    "        imgs,labels=eval_set[0],val_set[1]\n",
    "    else:\n",
    "         raise Exception(\"mode can only be one of['train','valid','eval']\")\n",
    "    print(\"训练集数量:\",len(imgs))\n",
    "    imgs_length=len(imgs)\n",
    "    index_list=list(range(imgs_length))\n",
    "    BATCHSIZE=100\n",
    "    def data_generator():\n",
    "        if mode=='train':\n",
    "            random.shuffle(index_list)\n",
    "            imgs_list=[]\n",
    "            labels_list=[]\n",
    "            for i in index_list:\n",
    "                img=np.array(imgs[i]).astype('float32')\n",
    "                img=np.reshape(imgs[i],[1,28,28]).astype('float32')\n",
    "                label=np.reshape(labels[i],[1]).astype('int64')\n",
    "                imgs_list.append(img)\n",
    "                labels_list.append(label)\n",
    "                if len(imgs_list)==BATCHSIZE:\n",
    "                    yield np.array(imgs_list),np.array(labels_list)\n",
    "                    imgs_list=[]\n",
    "                    labels_list=[]\n",
    "                if len(imgs_list)>0:\n",
    "                    yield np.array(imgs_list),np.array(labels_list)\n",
    "    return data_generator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LeNetModel(paddle.nn.Layer):\n",
    "    def __init__(self):\n",
    "        super(LeNetModel,self).__init__()\n",
    "        self.conv1=paddle.nn.Conv2D(in_channels=1,out_channels=6,kernel_size=5,stride=1)\n",
    "        self.pool1=paddle.nn.MaxPool2D(kernel_size=2,stride=2)\n",
    "        self.conv2=paddle.nn.Conv2D(in_channels=6,out_channels=16,kernel_size=5,stride=1)\n",
    "        self.pool2=paddle.nn.MaxPool2D(kernel_size=2,stride=2)\n",
    "        self.fc1=paddle.nn.Linear(256,120)\n",
    "        self.fc2=paddle.nn.Linear(120,84)\n",
    "        self.fc3=paddle.nn.Linear(84,10)\n",
    "    def forward(self,x):\n",
    "        x=self.conv1(x)\n",
    "        x=F.relu(x)\n",
    "        x=self.pool1(x)\n",
    "        x=self.conv2(x)\n",
    "        x=F.relu(x)\n",
    "        x=self.pool2(x)\n",
    "        x=paddle.flatten(x,start_axis=1,stop_axis=-1)\n",
    "        x=self.fc1(x)\n",
    "        x=F.relu(x)\n",
    "        x=self.fc2(x)\n",
    "        x=F.relu(x)\n",
    "        x=self.fc3(x)\n",
    "        return x\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 38,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model):\n",
    "    model.train()\n",
    "    opt=paddle.optimizer.SGD(learning_rate=0.01,parameters=model.parameters())\n",
    "    EPOCH_NUM=5\n",
    "    for epoch_id in range(EPOCH_NUM):\n",
    "        for batch_id,data in enumerate(train_loader()):\n",
    "            images,labels=data\n",
    "            images=paddle.to_tensor(images)\n",
    "            labels=paddle.to_tensor(labels)\n",
    "            predicts=model(images)\n",
    "            loss=F.cross_entropy(predicts,labels)\n",
    "            loss=F.softmax_with_cross_entropy(predicts,labels)\n",
    "            avg_loss=paddle.mean(loss)\n",
    "            if batch_id%200==0:\n",
    "                print(\"epoch:{},batch:{},loss is:{}\".format(epoch_id,batch_id,avg_loss.numpy()))\n",
    "            avg_loss.backward()\n",
    "            opt.step()\n",
    "            opt.clear_grad()\n",
    "            model.eval()\n",
    "            accuracies=[]\n",
    "            losses=[]\n",
    "            for batch_id,data in enumerate(valid_loader()):\n",
    "                images,labels=data\n",
    "                images=paddle.to_tensor(images)\n",
    "                labels=paddle.to_tensor(labels)\n",
    "                logits=model(images)\n",
    "                pred=F.softmax(logits)\n",
    "                loss=F.softmax_with_cross_entropy(logits,labels)\n",
    "                acc=paddle.metric.accuracy(pred,labbels)\n",
    "                accuracies.append(acc.numpy())\n",
    "                losses.append(loss.numpy())\n",
    "                print(\"[validation]accuracy/loss:{}/{}\".format(np.mean(accuracies).np.mean(losses)))\n",
    "            model.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "训练集数量: 50000\n",
      "训练集数量: 10000\n",
      "epoch:0,batch:0,loss is:[6.4364133]\n",
      "epoch:0,batch:200,loss is:[0.2096441]\n",
      "epoch:0,batch:400,loss is:[0.08454891]\n",
      "epoch:0,batch:600,loss is:[0.04226108]\n",
      "epoch:0,batch:800,loss is:[0.00050987]\n",
      "epoch:0,batch:1000,loss is:[0.00033026]\n",
      "epoch:0,batch:1200,loss is:[0.00062455]\n",
      "epoch:0,batch:1400,loss is:[0.00670218]\n",
      "epoch:0,batch:1600,loss is:[0.01019556]\n",
      "epoch:0,batch:1800,loss is:[0.06817015]\n",
      "epoch:0,batch:2000,loss is:[0.0377546]\n",
      "epoch:0,batch:2200,loss is:[5.9725637e-05]\n",
      "epoch:0,batch:2400,loss is:[7.0333726e-06]\n",
      "epoch:0,batch:2600,loss is:[0.]\n",
      "epoch:0,batch:2800,loss is:[0.00035197]\n",
      "epoch:0,batch:3000,loss is:[0.0217329]\n",
      "epoch:0,batch:3200,loss is:[8.893409e-05]\n",
      "epoch:0,batch:3400,loss is:[5.149974e-05]\n",
      "epoch:0,batch:3600,loss is:[5.960466e-07]\n",
      "epoch:0,batch:3800,loss is:[1.7881409e-06]\n",
      "epoch:0,batch:4000,loss is:[0.]\n",
      "epoch:0,batch:4200,loss is:[1.9073505e-06]\n",
      "epoch:0,batch:4400,loss is:[0.05297878]\n",
      "epoch:0,batch:4600,loss is:[0.00426096]\n",
      "epoch:0,batch:4800,loss is:[7.6296856e-05]\n",
      "epoch:0,batch:5000,loss is:[0.00012601]\n",
      "epoch:0,batch:5200,loss is:[0.00218559]\n",
      "epoch:0,batch:5400,loss is:[2.9802766e-05]\n",
      "epoch:0,batch:5600,loss is:[0.00062837]\n",
      "epoch:0,batch:5800,loss is:[2.264979e-06]\n",
      "epoch:0,batch:6000,loss is:[9.77521e-06]\n",
      "epoch:0,batch:6200,loss is:[2.372293e-05]\n",
      "epoch:0,batch:6400,loss is:[4.2915435e-06]\n",
      "epoch:0,batch:6600,loss is:[2.8014576e-05]\n",
      "epoch:0,batch:6800,loss is:[0.]\n",
      "epoch:0,batch:7000,loss is:[3.5762793e-07]\n",
      "epoch:0,batch:7200,loss is:[0.00049532]\n",
      "epoch:0,batch:7400,loss is:[1.192093e-07]\n",
      "epoch:0,batch:7600,loss is:[0.02940099]\n",
      "epoch:0,batch:7800,loss is:[0.00034064]\n",
      "epoch:0,batch:8000,loss is:[6.771317e-05]\n",
      "epoch:0,batch:8200,loss is:[2.0384996e-05]\n",
      "epoch:0,batch:8400,loss is:[0.00101409]\n",
      "epoch:0,batch:8600,loss is:[1.192093e-07]\n",
      "epoch:0,batch:8800,loss is:[0.8214358]\n",
      "epoch:0,batch:9000,loss is:[4.4260006]\n",
      "epoch:0,batch:9200,loss is:[2.384186e-07]\n",
      "epoch:0,batch:9400,loss is:[0.]\n",
      "epoch:0,batch:9600,loss is:[0.00182557]\n",
      "epoch:0,batch:9800,loss is:[2.384186e-07]\n",
      "epoch:0,batch:10000,loss is:[0.9107899]\n",
      "epoch:0,batch:10200,loss is:[7.867844e-06]\n",
      "epoch:0,batch:10400,loss is:[0.2564274]\n",
      "epoch:0,batch:10600,loss is:[14.709698]\n",
      "epoch:0,batch:10800,loss is:[0.00024161]\n",
      "epoch:0,batch:11000,loss is:[20.17001]\n",
      "epoch:0,batch:11200,loss is:[0.01264402]\n",
      "epoch:0,batch:11400,loss is:[1.192093e-07]\n",
      "epoch:0,batch:11600,loss is:[1.0319488]\n",
      "epoch:0,batch:11800,loss is:[0.01196293]\n",
      "epoch:0,batch:12000,loss is:[2.9325915e-05]\n",
      "epoch:0,batch:12200,loss is:[0.0001458]\n",
      "epoch:0,batch:12400,loss is:[0.00014616]\n",
      "epoch:0,batch:12600,loss is:[0.]\n",
      "epoch:0,batch:12800,loss is:[1.192093e-07]\n",
      "epoch:0,batch:13000,loss is:[0.26273462]\n",
      "epoch:0,batch:13200,loss is:[0.00016738]\n",
      "epoch:0,batch:13400,loss is:[0.01570373]\n",
      "epoch:0,batch:13600,loss is:[0.]\n",
      "epoch:0,batch:13800,loss is:[0.00058024]\n",
      "epoch:0,batch:14000,loss is:[0.]\n",
      "epoch:0,batch:14200,loss is:[3.5762793e-07]\n",
      "epoch:0,batch:14400,loss is:[0.]\n",
      "epoch:0,batch:14600,loss is:[0.00045292]\n",
      "epoch:0,batch:14800,loss is:[2.777615e-05]\n",
      "epoch:0,batch:15000,loss is:[2.4080566e-05]\n",
      "epoch:0,batch:15200,loss is:[0.00979472]\n",
      "epoch:0,batch:15400,loss is:[1.192093e-07]\n",
      "epoch:0,batch:15600,loss is:[4.768373e-07]\n",
      "epoch:0,batch:15800,loss is:[2.384186e-07]\n",
      "epoch:0,batch:16000,loss is:[1.192093e-07]\n",
      "epoch:0,batch:16200,loss is:[0.05318393]\n",
      "epoch:0,batch:16400,loss is:[0.12783365]\n",
      "epoch:0,batch:16600,loss is:[0.00677683]\n",
      "epoch:0,batch:16800,loss is:[1.6791916]\n",
      "epoch:0,batch:17000,loss is:[0.00049424]\n",
      "epoch:0,batch:17200,loss is:[7.15256e-07]\n",
      "epoch:0,batch:17400,loss is:[2.384186e-07]\n",
      "epoch:0,batch:17600,loss is:[1.3113031e-06]\n",
      "epoch:0,batch:17800,loss is:[0.]\n",
      "epoch:0,batch:18000,loss is:[1.0609683e-05]\n",
      "epoch:0,batch:18200,loss is:[0.0001963]\n",
      "epoch:0,batch:18400,loss is:[0.00302157]\n",
      "epoch:0,batch:18600,loss is:[1.7047074e-05]\n",
      "epoch:0,batch:18800,loss is:[4.2916265e-05]\n",
      "epoch:0,batch:19000,loss is:[6.0796924e-06]\n",
      "epoch:0,batch:19200,loss is:[8.3446537e-07]\n",
      "epoch:0,batch:19400,loss is:[0.48432294]\n",
      "epoch:0,batch:19600,loss is:[4.172334e-06]\n",
      "epoch:0,batch:19800,loss is:[0.00174955]\n",
      "epoch:0,batch:20000,loss is:[0.00017203]\n",
      "epoch:0,batch:20200,loss is:[0.]\n",
      "epoch:0,batch:20400,loss is:[0.]\n",
      "epoch:0,batch:20600,loss is:[1.001363e-05]\n",
      "epoch:0,batch:20800,loss is:[4.7804067e-05]\n",
      "epoch:0,batch:21000,loss is:[2.193475e-05]\n",
      "epoch:0,batch:21200,loss is:[0.]\n",
      "epoch:0,batch:21400,loss is:[0.00126293]\n",
      "epoch:0,batch:21600,loss is:[3.9700239]\n",
      "epoch:0,batch:21800,loss is:[0.00072631]\n",
      "epoch:0,batch:22000,loss is:[0.00197564]\n",
      "epoch:0,batch:22200,loss is:[0.]\n",
      "epoch:0,batch:22400,loss is:[4.351526]\n",
      "epoch:0,batch:22600,loss is:[0.]\n",
      "epoch:0,batch:22800,loss is:[3.1661108]\n",
      "epoch:0,batch:23000,loss is:[0.00015248]\n",
      "epoch:0,batch:23200,loss is:[0.02518232]\n",
      "epoch:0,batch:23400,loss is:[0.0317132]\n",
      "epoch:0,batch:23600,loss is:[0.]\n",
      "epoch:0,batch:23800,loss is:[0.]\n",
      "epoch:0,batch:24000,loss is:[0.00016011]\n",
      "epoch:0,batch:24200,loss is:[1.2517054e-05]\n",
      "epoch:0,batch:24400,loss is:[6.4373226e-06]\n",
      "epoch:0,batch:24600,loss is:[8.8215265e-06]\n",
      "epoch:0,batch:24800,loss is:[4.3512337e-05]\n",
      "epoch:0,batch:25000,loss is:[6.532882e-05]\n",
      "epoch:0,batch:25200,loss is:[5.9779983]\n",
      "epoch:0,batch:25400,loss is:[4.768373e-07]\n",
      "epoch:0,batch:25600,loss is:[1.2539477]\n",
      "epoch:0,batch:25800,loss is:[0.00026164]\n",
      "epoch:0,batch:26000,loss is:[0.00924484]\n",
      "epoch:0,batch:26200,loss is:[0.17405573]\n",
      "epoch:0,batch:26400,loss is:[1.1920936e-06]\n",
      "epoch:0,batch:26600,loss is:[6.6757425e-06]\n"
     ]
    }
   ],
   "source": [
    "train_loader=load_data('train')\n",
    "valid_loader=load_data('valid')\n",
    "model=LeNetModel()\n",
    "train(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'paddle' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-8-4d98e0c0ceaf>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[0mpaddle\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msave\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mmodel\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstalte_dict\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;34m'mnist-cnn.pdparams'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m: name 'paddle' is not defined"
     ]
    }
   ],
   "source": [
    "paddle.save(model.stalte_dict(),'mnist-cnn.pdparams')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "ename": "ImportError",
     "evalue": "cannot import name 'image' from 'PIL' (C:\\ProgramData\\Anaconda3\\lib\\site-packages\\PIL\\__init__.py)",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mImportError\u001b[0m                               Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-9-659067877313>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[1;32m----> 1\u001b[1;33m \u001b[1;32mfrom\u001b[0m \u001b[0mPIL\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mimage\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      2\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mnumpy\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mnp\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      3\u001b[0m \u001b[0mim\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mimage\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mopen\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'data/0.jpg'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mconvert\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'L'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      4\u001b[0m \u001b[0mim\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mimresize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m28\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m28\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mimage\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mANTIALIAS\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      5\u001b[0m \u001b[0mimg\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mnp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0marray\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mim\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mreshape\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m28\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m28\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mastype\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;34m'float32'\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mImportError\u001b[0m: cannot import name 'image' from 'PIL' (C:\\ProgramData\\Anaconda3\\lib\\site-packages\\PIL\\__init__.py)"
     ]
    }
   ],
   "source": [
    "from PIL import image\n",
    "import numpy as np\n",
    "im=image.open('data/0.jpg').convert('L')\n",
    "im=imresize((28,28),image.ANTIALIAS)\n",
    "img=np.array(im).reshape(1,1,28,28).astype('float32')\n",
    "img=1.0-img/255."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "ename": "NameError",
     "evalue": "name 'im' is not defined",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mNameError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-10-2743afd8a8ed>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m()\u001b[0m\n\u001b[0;32m      1\u001b[0m \u001b[1;32mimport\u001b[0m \u001b[0mmatplotlib\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mpyplot\u001b[0m \u001b[1;32mas\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m      2\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mfigure\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mfigsize\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m,\u001b[0m\u001b[1;36m2\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m----> 3\u001b[1;33m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mimshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mim\u001b[0m\u001b[1;33m,\u001b[0m\u001b[0mcmap\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mcm\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbinary\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m      4\u001b[0m \u001b[0mplt\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mshow\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mNameError\u001b[0m: name 'im' is not defined"
     ]
    }
   ],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "plt.figure(figsize=(2,2))\n",
    "plt.imshow(im,cmap=plt.cm.binary)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model=LeNetModel()\n",
    "params_file_path='mnist-cnn.pdparams'\n",
    "param_dict=paddle.load(params_file_path)\n",
    "model.load_dict(param_dict)\n",
    "model.eval()\n",
    "tensor_img=img\n",
    "results=model(paddle.to_tensor(tensor_img))\n",
    "lab=np.argsort(results.numpy())\n",
    "print(\"本次预测的数字是:\",lab[0][-1])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
