{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2.5.2\n"
     ]
    }
   ],
   "source": [
    "import paddle\n",
    "import paddle.nn.functional as F\n",
    "from paddle.nn import Linear\n",
    "import numpy as np\n",
    "import os\n",
    "import json\n",
    "import random\n",
    "print(paddle.__version__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "from paddle.nn import Conv2D,MaxPool2D"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data(mode='train'):\n",
    "    with open('data/mnist.json')as f:\n",
    "        data=json.load(f)\n",
    "        \n",
    "    train_set,val_set,eval_set=data\n",
    "    if mode=='train':\n",
    "        imgs,labels=train_set[0],train_set[1]\n",
    "    elif mode==valid:\n",
    "         imgs,labels=val_set[0],val_set[1]\n",
    "    elif mode==eval:\n",
    "        imgs,labels=eval_set[0],eval_set[1]\n",
    "    else:\n",
    "        raise Exception(\"mode can only be one of['train','valid','eval']\")\n",
    "    print(\"训练数据集数量:\",len(imgs))\n",
    "    \n",
    "    imgs_length=len(imgs)\n",
    "    index_list=list(range(imgs_length))\n",
    "    BATCHSIZE=100\n",
    "    def data_generator():\n",
    "        if mode=='train':\n",
    "            random.shuffle(index_list)\n",
    "            imgs_list=[]\n",
    "            labels_list=[]\n",
    "            for i in index_list:\n",
    "                img=np.array(imgs[i]).astype('float32')\n",
    "                label=np.reshape(labels[i],[1]).astype('int64')\n",
    "                imgs_list.append(img)\n",
    "                labels_list.append(label)\n",
    "                if len(imgs_list)==BATCHSIZE:\n",
    "                    yield np.array(imgs_list),np.array(labels_list)\n",
    "                    imgs_list=[]\n",
    "                    labels_list=[]\n",
    "            if len(imgs_list)>0:\n",
    "                yield np.array(imgs_list),np.array(labels_list)\n",
    "            return data_generator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_data(mode='train'):\n",
    "    with open('data/mnist.json')as f:\n",
    "        data=json.load(f)\n",
    "    train_set,val_set,eval_set=data\n",
    "    if mode=='train':\n",
    "        imgs,labels=train_set[0],train_set[1]\n",
    "    elif mode==valid:\n",
    "        imgs,labels=val_set[0],val_set[1]\n",
    "    elif model=='eval':\n",
    "        imgs,labels=eval_set[0],val_set[1]\n",
    "    else:\n",
    "         raise Exception(\"mode can only be one of['train','valid','eval']\")\n",
    "    print(\"训练集数量:\",len(imgs))\n",
    "    imgs_length=len(imgs)\n",
    "    index_list=list(range(imgs_length))\n",
    "    BATCHSIZE=100\n",
    "    def data_generator():\n",
    "        if mode=='train':\n",
    "            random.shuffle(index_list)\n",
    "            imgs_list=[]\n",
    "            labels_list=[]\n",
    "            for i in index_list:\n",
    "                img=np.array(imgs[i]).astype('float32')\n",
    "                img=np.reshape(imgs[i],[1,28,28]).astype('float32')\n",
    "                label=np.reshape(labels[i],[1]).astype('int64')\n",
    "                imgs_list.append(img)\n",
    "                labels_list.append(label)\n",
    "                if len(imgs_list)==BATCHSIZE:\n",
    "                    yield np.array(imgs_list),np.array(labels_list)\n",
    "                    imgs_list=[]\n",
    "                    labels_list=[]\n",
    "            if len(imgs_list)>0:\n",
    "                yield np.array(imgs_list),np.array(labels_list)\n",
    "            return data_generator"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "class LeNetModel(paddle.nn.Layer):\n",
    "    def __init__(self):\n",
    "        super(LeNetModel,self).__init__()\n",
    "        self.conv1=paddle.nn.Conv2D(in_channels=1,out_channels=6,kernel_size=5,stride=1)\n",
    "        self.pool1=paddle.nn.MaxPool2D(kernel_size=2,stride=2)\n",
    "        self.conv2=paddle.nn.Conv2D(in_channels=6,out_channels=16,kernel_size=5,stride=1)\n",
    "        self.pool2=paddle.nn.MaxPool2D(kernel_size=2,stride=2)\n",
    "        self.fc1=paddle.nn.Linear(256,120)\n",
    "        self.fc2=paddle.nn.Linear(120,84)\n",
    "        self.fc3=paddle.nn.Linear(84,10)\n",
    "    def forward(self,x):\n",
    "        x=self.conv1(x)\n",
    "        x=F.ReLU(x)\n",
    "        x=self.pool1(x)\n",
    "        x=self.conv2(x)\n",
    "        x=F.ReLU(x)\n",
    "        x=self.pool2(x)\n",
    "        x=paddle.flatten(x,start_axis=1,stop_axis=-1)\n",
    "        x=self.fc1(x)\n",
    "        x=F.ReLU(x)\n",
    "        x=self.fc2(x)\n",
    "        x=F.ReLU(x)\n",
    "        x=self.fc3(x)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(model):\n",
    "    model.train()\n",
    "    opt=paddle.optimizer.SGD(learning_rate=0.01,parameters=model.parameters())\n",
    "    EPOCH_NUM=5\n",
    "    for epoch_id in range(EPOCH_NUM):\n",
    "        for batch_id,data in enumerate(train_loader()):\n",
    "            images,labels=data\n",
    "            images=paddle.to_tensor(images)\n",
    "            labels=paddle.to_tensor(labels)\n",
    "            predicts=model(images)\n",
    "            loss=F.cross_entropy(predicts,labels)\n",
    "            loss=F.softmax_with_cross_entropy(predicts,labels)\n",
    "            avg_loss=paddle.mean(loss)\n",
    "            if batch_id%200==0:\n",
    "                print(\"epoch:{},batch:{},loss is:{}\".format(epoch_id,batch_id,avg_loss.numpy()))\n",
    "            avg_loss.backward()\n",
    "            opt.stpe()\n",
    "            opt.clear_grad()\n",
    "            model.eval()\n",
    "            accuracies=[]\n",
    "            losses=[]\n",
    "            for batch_id,data in enumerate(valid_loader()):\n",
    "                images,labels=data\n",
    "                images=paddle.to_tensor(images)\n",
    "                labels=paddle.to_tensor(labels)\n",
    "                logits=model(images)\n",
    "                pred=F.softmax(logits)\n",
    "                loss=F.softmax_with_cross_entropy(logits,labels)\n",
    "                acc=paddle.metric.accuracy(pred,labbels)\n",
    "                accuracies.append(acc.numpy())\n",
    "                losses.append(loss.numpy())\n",
    "                print(\"[validation]accuracy/loss:{}/{}\".format(np.mean(accuracies).np.mean(losses)))\n",
    "            model.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "train_loader=load_data('train')\n",
    "valid_loader=load_data('valid')\n",
    "model=LeNetModel()\n",
    "train(model)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "paddle.save(model.stalte_dict(),'mnist-cnn.pdparams')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from PIL import image\n",
    "import numpy as np\n",
    "im=image.open('data/0.jpg').convert('L')\n",
    "im=imresize((28,28),image.ANTIALIAS)\n",
    "img=np.array(im).reshape(1,1,28,28).astype('float32')\n",
    "img=1.0-img/255."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "plt.figure(figsize=(2,2))\n",
    "plt.imshow(im,cmap=plt.cm.binary)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "model=LeNetModel()\n",
    "params_file_path='mnist-cnn.pdparams'\n",
    "param_dict=paddle.load(params_file_path)\n",
    "model.load_dict(param_dict)\n",
    "model.eval()\n",
    "tensor_img=img\n",
    "results=model(paddle.to_tensor(tensor_img))\n",
    "lab=np.argsort(results.numpy())\n",
    "print(\"本次预测的数字是:\",lab[0][-1])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.0"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
