{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "## 任务描述：\n",
    "\n",
    "### 如何根据据图像的视觉内容为图像赋予一个语义类别是**图像分类**的目标，也是图像检索、图像内容分析和目标识别等问题的基础。\n",
    "\n",
    "### 实践内容：利用飞桨动态图搭建一个**全连接神经网络**，对包含不同车辆的图像进行分类。\n",
    "数据说明里 标签值说明：1=“汽 车”'，2=“摩托车”，3=“货 车” \n",
    "\n",
    "### 特别提示：本实践所用数据集均来自互联网，请勿用于商务用途。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "\n",
    "import os\n",
    "import zipfile\n",
    "import random\n",
    "import paddle\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import PIL.Image as Image\n",
    "from paddle.io import Dataset"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "\n",
    "'''\n",
    "参数配置\n",
    "'''\n",
    "train_parameters = {\n",
    "    \"input_size\": [3, 120, 120],                             #输入图片的shape\n",
    "    \"class_dim\": 3,                                          #分类数\n",
    "    \"src_path\":\"/home/aistudio/data/data72920/Data.zip\",     #原始数据集路径\n",
    "    \"target_path\":\"/home/aistudio/work/\",                    #要解压的路径\n",
    "    \"train_list_path\": \"/home/aistudio/data/train.txt\",      #train.txt路径\n",
    "    \"eval_list_path\": \"/home/aistudio/data/eval.txt\",        #eval.txt路径\n",
    "    \"label_dict\":{'0':'汽车','1':'摩托车','2':'货车'},        #标签字典\n",
    "    \"num_epochs\": 5,                                        #训练轮数\n",
    "    \"train_batch_size\": 8,                                   #训练时每个批次的大小\n",
    "    \"learning_strategy\": {                                   #优化函数相关的配置\n",
    "        \"lr\": 0.001                                          #超参数学习率\n",
    "    }, \n",
    "    'skip_steps': 50,                                        #每N个批次打印一次结果\n",
    "    'save_steps': 500,                                       #每N个批次保存一次模型参数\n",
    "    \"checkpoints\": \"/home/aistudio/work/checkpoints\"         #保存的路径\n",
    "\n",
    "}\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# **一、数据准备**\n",
    "\n",
    "### （1）解压原始数据集\n",
    "\n",
    "### （2）按照比例划分训练集与验证集\n",
    "\n",
    "### （3）乱序，生成数据列表\n",
    "\n",
    "### （4）定义数据读取器"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "\r\n",
    "#解压原始数据集\r\n",
    "def unzip_data(src_path,target_path):\r\n",
    "    '''\r\n",
    "    解压原始数据集，将src_path路径下的zip包解压至target_path目录下\r\n",
    "    '''\r\n",
    "    if(not os.path.isdir(os.path.join(target_path,'Data'))):     \r\n",
    "        z = zipfile.ZipFile(src_path, 'r')\r\n",
    "        z.extractall(path=target_path)\r\n",
    "        z.close()\r\n",
    "        print('数据集解压完成')\r\n",
    "    else:\r\n",
    "        print('文件已存在')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "\r\n",
    "unzip_data('data/data72920/Data.zip','/home/aistudio/work/')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "\n",
    "def get_data_list(target_path, train_list_path, eval_list_path):\n",
    "    '''\n",
    "    生成数据列表\n",
    "    '''\n",
    "    data_dir = 'work/Data'\n",
    "    all_data_list  = []\n",
    "\n",
    "    for im in os.listdir(data_dir):\n",
    "        img_path = os.path.join(data_dir, im)\n",
    "        img_label = str(int(im.split('_')[0])-1)\n",
    "        all_data_list.append(img_path + '\\t' + img_label + '\\n')\n",
    "\n",
    "\n",
    "    # 对训练列表进行乱序\n",
    "    random.shuffle(all_data_list)\n",
    "    \n",
    "    with open(train_list_path, 'a') as f1:\n",
    "        with open(eval_list_path, 'a') as f2:\n",
    "            for ind, img_path_label in enumerate(all_data_list):\n",
    "                #划分测试集和训练集\n",
    "                if ind % 10 == 0:\n",
    "                    f2.write(img_path_label) \n",
    "                else:\n",
    "                    f1.write(img_path_label)\n",
    "    print ('生成数据列表完成！')\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "\n",
    "#参数初始化\n",
    "src_path=train_parameters['src_path']\n",
    "target_path=train_parameters['target_path']\n",
    "train_list_path=train_parameters['train_list_path']\n",
    "eval_list_path=train_parameters['eval_list_path']\n",
    "\n",
    "#解压原始数据到指定路径\n",
    "unzip_data(src_path,target_path)\n",
    "\n",
    "#每次生成数据列表前，首先清空train.txt和eval.txt\n",
    "with open(train_list_path, 'w') as f: \n",
    "    f.seek(0)\n",
    "    f.truncate() \n",
    "with open(eval_list_path, 'w') as f: \n",
    "    f.seek(0)\n",
    "    f.truncate()     \n",
    "\n",
    "#生成数据列表   \n",
    "get_data_list(target_path,train_list_path,eval_list_path)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "\n",
    "class dataset(Dataset):\n",
    "    def __init__(self, data_path, mode='train'):\n",
    "        \"\"\"\n",
    "        数据读取器\n",
    "        :param data_path: 数据集所在路径\n",
    "        :param mode: train or eval\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        self.data_path = data_path\n",
    "        self.img_paths = []\n",
    "        self.labels = []\n",
    "\n",
    "        if mode == 'train':\n",
    "            with open(os.path.join(self.data_path, \"train.txt\"), \"r\", encoding=\"utf-8\") as f:\n",
    "                self.info = f.readlines()\n",
    "            for img_info in self.info:\n",
    "                img_path, label = img_info.strip().split('\\t')\n",
    "                self.img_paths.append(img_path)\n",
    "                self.labels.append(int(label))\n",
    "\n",
    "        else:\n",
    "            with open(os.path.join(self.data_path, \"eval.txt\"), \"r\", encoding=\"utf-8\") as f:\n",
    "                self.info = f.readlines()\n",
    "            for img_info in self.info:\n",
    "                img_path, label = img_info.strip().split('\\t')\n",
    "                self.img_paths.append(img_path)\n",
    "                self.labels.append(int(label))\n",
    "\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        \"\"\"\n",
    "        获取一组数据\n",
    "        :param index: 文件索引号\n",
    "        :return:\n",
    "        \"\"\"\n",
    "        # 第一步打开图像文件并获取label值\n",
    "        img_path = self.img_paths[index]\n",
    "        img = Image.open(img_path)\n",
    "        if img.mode != 'RGB':\n",
    "            img = img.convert('RGB') \n",
    "        img = np.array(img).astype('float32')\n",
    "        img = img.transpose((2, 0, 1)) / 255\n",
    "        label = self.labels[index]\n",
    "        label = np.array([label], dtype=\"int64\")\n",
    "        return img, label\n",
    "\n",
    "    def print_sample(self, index: int = 0):\n",
    "        print(\"文件名\", self.img_paths[index], \"\\t标签值\", self.labels[index])\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.img_paths)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "\n",
    "#训练数据加载\n",
    "train_dataset = dataset('/home/aistudio/data',mode='train')\n",
    "train_loader = paddle.io.DataLoader(train_dataset, \n",
    "                                    batch_size=train_parameters['train_batch_size'], \n",
    "                                    shuffle=True\n",
    "                                    )\n",
    "#测试数据加载\n",
    "eval_dataset = dataset('/home/aistudio/data',mode='eval')\n",
    "eval_loader = paddle.io.DataLoader(eval_dataset,\n",
    "                                   batch_size=train_parameters['train_batch_size'], \n",
    "                                   shuffle=False\n",
    "                                   )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "\n",
    "train_dataset.print_sample(200)\n",
    "print(train_dataset.__len__())\n",
    "eval_dataset.print_sample(0)\n",
    "print(eval_dataset.__len__())\n",
    "print(eval_dataset.__getitem__(10)[0].shape)\n",
    "print(eval_dataset.__getitem__(10)[1].shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# **二、模型配置**\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 深度神经网络(DNN)\n",
    "\n",
    "### **深度神经网络（Deep Neural Networks，简称DNN）是深度学习的基础，其结构为input、hidden（可有多层）、output，每层均为全连接。**\n",
    "![](https://ai-studio-static-online.cdn.bcebos.com/c60fc28848cf469fa3a7824aa637a03f3b2b213ce7b84659919cb24b4430bffb)\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 定义DNN网络\r\n",
    "class MyDNN(paddle.nn.Layer):\r\n",
    "    def __init__(self):\r\n",
    "        super(MyDNN,self).__init__()\r\n",
    "        self.linear1 = paddle.nn.Linear(in_features=3*120*120, out_features=4096)\r\n",
    "        self.relu1 = paddle.nn.ReLU()\r\n",
    "\r\n",
    "        self.linear2 = paddle.nn.Linear(in_features=4096, out_features=2048)\r\n",
    "        self.relu2 = paddle.nn.ReLU()\r\n",
    "\r\n",
    "        self.linear3 = paddle.nn.Linear(in_features=2048, out_features=1024)\r\n",
    "        self.relu3 = paddle.nn.ReLU()\r\n",
    "\r\n",
    "        self.linear4 = paddle.nn.Linear(in_features=1024, out_features=3)\r\n",
    "\r\n",
    "    def forward(self,input):        # forward 定义执行实际运行时网络的执行逻辑\r\n",
    "        # input.shape (8, 3, 120, 120)\r\n",
    "        x = paddle.reshape(input, shape=[-1,3*120*120]) #-1 表示这个维度的值是从x的元素总数和剩余维度推断出来的，有且只能有一个维度设置为-1\r\n",
    "        # print(x.shape)\r\n",
    "        x = self.linear1(x)\r\n",
    "        x = self.relu1(x)\r\n",
    "        # print('1', x.shape)\r\n",
    "        x = self.linear2(x)\r\n",
    "        x = self.relu2(x)\r\n",
    "        # print('2',x.shape)\r\n",
    "        x = self.linear3(x)\r\n",
    "        x = self.relu3(x)\r\n",
    "        # print('3',x.shape)\r\n",
    "        y = self.linear4(x)\r\n",
    "        # print('4',y.shape)\r\n",
    "        return y"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 三、模型训练"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "\n",
    "def draw_process(title,color,iters,data,label):\n",
    "    plt.title(title, fontsize=24)\n",
    "    plt.xlabel(\"iter\", fontsize=20)\n",
    "    plt.ylabel(label, fontsize=20)\n",
    "    plt.plot(iters, data,color=color,label=label) \n",
    "    plt.legend()\n",
    "    plt.grid()\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "model = MyDNN()\n",
    "model.train()\n",
    "cross_entropy = paddle.nn.CrossEntropyLoss()\n",
    "optimizer = paddle.optimizer.Adam(learning_rate=train_parameters['learning_strategy']['lr'],\n",
    "                                  parameters=model.parameters()) \n",
    "                                  \n",
    "steps = 0\n",
    "Iters, total_loss, total_acc = [], [], []\n",
    "\n",
    "for epo in range(train_parameters['num_epochs']):\n",
    "    for _, data in enumerate(train_loader()):\n",
    "        steps += 1\n",
    "        x_data = data[0]\n",
    "        y_data = data[1]\n",
    "        predicts = model(x_data)\n",
    "        loss = cross_entropy(predicts, y_data)\n",
    "        acc = paddle.metric.accuracy(predicts, y_data)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        optimizer.clear_grad()\n",
    "        if steps % train_parameters[\"skip_steps\"] == 0:\n",
    "            Iters.append(steps)\n",
    "            total_loss.append(loss.numpy()[0])\n",
    "            total_acc.append(acc.numpy()[0])\n",
    "            #打印中间过程\n",
    "            print('epo: {}, step: {}, loss is: {}, acc is: {}'\\\n",
    "                  .format(epo, steps, loss.numpy(), acc.numpy()))\n",
    "        #保存模型参数\n",
    "        if steps % train_parameters[\"save_steps\"] == 0:\n",
    "            save_path = train_parameters[\"checkpoints\"]+\"/\"+\"save_dir_\" + str(steps) + '.pdparams'\n",
    "            print('save model to: ' + save_path)\n",
    "            paddle.save(model.state_dict(),save_path)\n",
    "paddle.save(model.state_dict(),train_parameters[\"checkpoints\"]+\"/\"+\"save_dir_final.pdparams\")\n",
    "draw_process(\"trainning loss\",\"red\",Iters,total_loss,\"trainning loss\")\n",
    "draw_process(\"trainning acc\",\"green\",Iters,total_acc,\"trainning acc\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 四、模型评估"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "'''\n",
    "模型评估\n",
    "'''\n",
    "model__state_dict = paddle.load('work/checkpoints/save_dir_final.pdparams')\n",
    "model_eval = MyDNN()\n",
    "model_eval.set_state_dict(model__state_dict) \n",
    "model_eval.eval()\n",
    "\n",
    "accs = []\n",
    "for _, data in enumerate(eval_loader()):\n",
    "    x_data = data[0]\n",
    "    y_data = data[1]\n",
    "    predicts = model_eval(x_data)\n",
    "    acc = paddle.metric.accuracy(predicts, y_data)\n",
    "    accs.append(acc.numpy()[0])\n",
    "print('模型在验证集上的准确率为：',np.mean(accs))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# **五、模型预测**"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "def load_image(img_path):\n",
    "    '''\n",
    "    预测图片预处理\n",
    "    '''\n",
    "    img = Image.open(img_path)\n",
    "    # print(img.mode)\n",
    "    if img.mode != 'RGB': \n",
    "        img = img.convert('RGB') \n",
    "    img = img.resize((120, 120), Image.ANTIALIAS)\n",
    "    img = np.array(img).astype('float32') \n",
    "    img = img.transpose((2, 0, 1)) / 255  # HWC to CHW 并像素归一化\n",
    "    return img\n",
    "\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "model__state_dict = paddle.load('work/checkpoints/save_dir_final.pdparams')\n",
    "model_predict = MyDNN()\n",
    "model_predict.set_state_dict(model__state_dict) \n",
    "model_predict.eval()\n",
    "infer_path='work/Data/1_100.png'\n",
    "infer_img = Image.open(infer_path)\n",
    "plt.imshow(infer_img)          #根据数组绘制图像\n",
    "plt.show()                     #显示图像\n",
    "#对预测图片进行预处理\n",
    "infer_img = load_image(infer_path)\n",
    "# print(type(infer_img))\n",
    "infer_img = infer_img[np.newaxis,:, : ,:]  #reshape(-1,3,50,50)\n",
    "infer_img = paddle.to_tensor(infer_img)\n",
    "results = model_predict(infer_img)\n",
    "print(results)\n",
    "results = paddle.nn.functional.softmax(results)\n",
    "print(results)\n",
    "print(\"汽车:{:.2f},摩托车:{:.2f}，货车:{:.2f}\" .format(results.numpy()[0][0],\n",
    "                                                    results.numpy()[0][1],\n",
    "                                                    results.numpy()[0][2]))\n",
    "\n",
    "                                                        "
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
