{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# PaddleCamp_第二期\n",
    "\n",
    "## 第二周 - 第一次作业参考答案\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "该作业共 **1** 部分：\n",
    "\n",
    "----\n",
    "* 代码填空题 **参考答案**："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "#因为本次实验除了代码还有许多知识点讲解。\r\n",
    "#知识点是以markdown形式展现的，含有许多图片。\r\n",
    "#运行完这条命令，并刷新一下本页面，本实验中的图片就可以展现出来了。\r\n",
    "#这条命令只需要运行一遍就可以了。\r\n",
    "!DATA_PATH=data/data7141/ && NEW_NAME=$(find -name *[0-9].ipynb) && NEW_NAME=${NEW_NAME%.*} && NEW_NAME=${NEW_NAME#./} && unzip -o ${DATA_PATH}cat_logist.zip  && cp -rf cat_logist/. ."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "import sys\r\n",
    "import numpy as np\r\n",
    "\r\n",
    "import lr_utils\r\n",
    "import matplotlib.pyplot as plt\r\n",
    "\r\n",
    "import paddle\r\n",
    "import paddle.fluid as fluid\r\n",
    "\r\n",
    "from paddle.utils.plot import Ploter\r\n",
    "%matplotlib inline\r\n",
    "\r\n",
    "\r\n",
    "# 调用load_dataset()函数，读取数据集\r\n",
    "train_set_x_orig, train_set_y, test_set_x_orig, test_set_y, classes = lr_utils.load_dataset()\r\n",
    "\r\n",
    "# 图片示例\r\n",
    "# 可观察到索引为“23”的图片应为“non-cat”\r\n",
    "index = 23\r\n",
    "plt.imshow(train_set_x_orig[index])\r\n",
    "print (\"y = \" + str(train_set_y[:, index]) + \", it's a '\" + classes[np.squeeze(train_set_y[:, index])].decode(\"utf-8\") +  \"' picture.\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "### START CODE HERE ### (≈ 3 lines of code)\n",
    "m_train = train_set_x_orig.shape[0]\n",
    "m_test = test_set_x_orig.shape[0]\n",
    "num_px = train_set_x_orig.shape[1]\n",
    "\n",
    "### END CODE HERE ###\n",
    "\n",
    "print (\"训练样本数: m_train = \" + str(m_train))\n",
    "print (\"测试样本数: m_test = \" + str(m_test))\n",
    "print (\"图片高度/宽度: num_px = \" + str(num_px))\n",
    "print (\"图片大小: (\" + str(num_px) + \", \" + str(num_px) + \", 3)\")\n",
    "print (\"train_set_x shape: \" + str(train_set_x_orig.shape))\n",
    "print (\"train_set_y shape: \" + str(train_set_y.shape))\n",
    "print (\"test_set_x shape: \" + str(test_set_x_orig.shape))\n",
    "print (\"test_set_y shape: \" + str(test_set_y.shape))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 定义维度\n",
    "DATA_DIM = num_px * num_px * 3\n",
    "\n",
    "# 转换数据形状\n",
    "\n",
    "### START CODE HERE ### (≈ 2 lines of code)\n",
    "train_set_x_flatten = train_set_x_orig.reshape(train_set_x_orig.shape[0], -1)\n",
    "test_set_x_flatten = test_set_x_orig.reshape(test_set_x_orig.shape[0], -1)\n",
    "### END CODE HERE ###\n",
    "\n",
    "print (\"train_set_x_flatten shape: \" + str(train_set_x_flatten.shape))\n",
    "print (\"train_set_y shape: \" + str(train_set_y.shape))\n",
    "print (\"test_set_x_flatten shape: \" + str(test_set_x_flatten.shape))\n",
    "print (\"test_set_y shape: \" + str(test_set_y.shape))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "### START CODE HERE ### (≈ 2 lines of code)\n",
    "train_set_x = train_set_x_flatten / 255\n",
    "test_set_x = test_set_x_flatten / 255\n",
    "\n",
    "### END CODE HERE ###\n",
    "\n",
    "train_set = np.hstack((train_set_x, train_set_y.T))\n",
    "test_set = np.hstack((test_set_x, test_set_y.T))\n",
    "\n",
    "# 读取训练数据或测试数据\n",
    "def read_data(data_set):\n",
    "    \"\"\"\n",
    "        一个reader\n",
    "        Args:\n",
    "            data_set -- 要获取的数据集\n",
    "        Return:\n",
    "            reader -- 用于获取训练数据集及其标签的生成器generator\n",
    "    \"\"\"\n",
    "    def reader():\n",
    "        \"\"\"\n",
    "        一个reader\n",
    "        Args:\n",
    "        Return:\n",
    "            data[:-1], data[-1] -- 使用yield返回生成器(generator)，\n",
    "            data[:-1]表示前n-1个元素组成的list，也就是训练数据，data[-1]表示最后一个元素，也就是对应的标签\n",
    "        \"\"\"\n",
    "        for data in data_set:\n",
    "            ### START CODE HERE ### (≈ 1 lines of code)\n",
    "            yield data[:-1], data[-1]\n",
    "            ### END CODE HERE ###\n",
    "    return reader\n",
    "\n",
    "test_array = [[1,1,1,1,0],\n",
    "                [2,2,2,2,1],\n",
    "                [3,3,3,3,0]]\n",
    "\n",
    "print(\"test_array for read_data:\")\n",
    "for value in read_data(test_array)():\n",
    "    print(value)\n",
    "    \n",
    "BATCH_SIZE=200\n",
    "\n",
    "# 设置训练reader\n",
    "train_reader = paddle.batch(\n",
    "    paddle.reader.shuffle(\n",
    "        read_data(train_set), buf_size=500),\n",
    "    batch_size=BATCH_SIZE)\n",
    "\n",
    "#设置测试 reader\n",
    "test_reader = paddle.batch(\n",
    "    paddle.reader.shuffle(\n",
    "        read_data(test_set), buf_size=500),\n",
    "    batch_size=BATCH_SIZE)\n",
    "    \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "BATCH_SIZE=200\n",
    "\n",
    "# 设置训练reader\n",
    "train_reader = paddle.batch(\n",
    "    paddle.reader.shuffle(\n",
    "        read_data(train_set), buf_size=500),\n",
    "    batch_size=BATCH_SIZE)\n",
    "\n",
    "#设置测试 reader\n",
    "test_reader = paddle.batch(\n",
    "    paddle.reader.shuffle(\n",
    "        read_data(test_set), buf_size=500),\n",
    "    batch_size=BATCH_SIZE)\n",
    "    \n",
    "    \n",
    "### START CODE HERE ### (≈ 3 lines of code) \n",
    "x = fluid.layers.data(name='x', shape=[DATA_DIM], dtype='float32')\n",
    "y_predict = fluid.layers.fc(input=x, size=2, act='softmax') \n",
    "y = fluid.layers.data(name='y', shape=[1], dtype='int64') \n",
    "### END CODE HERE ###   \n",
    "\n",
    "cost = fluid.layers.cross_entropy(input=y_predict, label=y)\n",
    "avg_loss = fluid.layers.mean(cost)\n",
    "acc = fluid.layers.accuracy(input=y_predict, label=y)\n",
    "\n",
    "startup_program = fluid.default_startup_program()\n",
    "main_program = fluid.default_main_program()\n",
    "\n",
    "#克隆main_program得到test_program\n",
    "#有些operator在训练和测试之间的操作是不同的，例如batch_norm，使用参数for_test来区分该程序是用来训练还是用来测试\n",
    "#该api不会删除任何操作符,请在backward和optimization之前使用\n",
    "test_program = fluid.default_main_program().clone(for_test=True)\n",
    "\n",
    "optimizer = fluid.optimizer.Adam(learning_rate=0.001)\n",
    "optimizer.minimize(avg_loss)\n",
    "print(\"optimizer is ready\")\n",
    "\n",
    "# 使用CPU训练\n",
    "use_cuda = False\n",
    "place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()\n",
    "\n",
    "exe = fluid.Executor(place)\n",
    "\n",
    "save_dirname=\"recognize_cat_inference.model\"\n",
    "\n",
    "train_prompt = \"Train cost\"\n",
    "cost_ploter = Ploter(train_prompt)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 将训练过程绘图表示\n",
    "def event_handler_plot(ploter_title, step, cost):\n",
    "    cost_ploter.append(ploter_title, step, cost)\n",
    "    cost_ploter.plot()\n",
    "    \n",
    "    \n",
    "def train_test(train_test_program, train_test_feed, train_test_reader):\n",
    "    # 将分类准确率存储在acc_set中\n",
    "    acc_set = []\n",
    "    # 将平均损失存储在avg_loss_set中\n",
    "    avg_loss_set = []\n",
    "    # 将测试 reader yield 出的每一个数据传入网络中进行训练\n",
    "    for test_data in train_test_reader():\n",
    "        acc_np, avg_loss_np = exe.run(\n",
    "            program=train_test_program,\n",
    "            feed=train_test_feed.feed(test_data),\n",
    "            fetch_list=[acc, avg_loss])\n",
    "        acc_set.append(float(acc_np))\n",
    "        avg_loss_set.append(float(avg_loss_np))\n",
    "    \n",
    "    # get test acc and loss\n",
    "     # 获得测试数据上的准确率和损失值\n",
    "    acc_val_mean = np.array(acc_set).mean()\n",
    "    avg_loss_val_mean = np.array(avg_loss_set).mean()\n",
    "    # 返回平均损失值，平均准确率\n",
    "    return avg_loss_val_mean, acc_val_mean"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "feeder = fluid.DataFeeder(place=place, feed_list=[x, y])\n",
    "exe.run(startup_program)\n",
    "\n",
    "PASS_NUM = 150\n",
    "epochs = [epoch_id for epoch_id in range(PASS_NUM)]\n",
    "\n",
    "\n",
    "lists = []\n",
    "\n",
    "step = 0\n",
    "for epoch_id in epochs:\n",
    "    for step_id, data in enumerate(train_reader()):\n",
    "        metrics = exe.run(\n",
    "            main_program,\n",
    "            feed=feeder.feed(data),\n",
    "            fetch_list=[avg_loss,acc])\n",
    "        #我们可以把训练结果打印输出，也可以用画图展示出来\n",
    "        if step % 10 == 0: #每训练10次，绘制一次曲线\n",
    "            event_handler_plot(train_prompt, step, metrics[0])\n",
    "        step += 1\n",
    "\n",
    "    # 测试每个epoch的分类效果\n",
    "    avg_loss_val, acc_val = train_test(\n",
    "        train_test_program=test_program,\n",
    "        train_test_reader=test_reader,\n",
    "        train_test_feed=feeder)\n",
    "\n",
    "    print(\"Test with Epoch %d, avg_cost: %s, acc: %s\" %\n",
    "          (epoch_id, avg_loss_val, acc_val))\n",
    "    lists.append((epoch_id, avg_loss_val, acc_val))\n",
    "    \n",
    "    # 保存训练好的模型参数用于预测\n",
    "    if save_dirname is not None:\n",
    "        fluid.io.save_inference_model(save_dirname, [\"x\"], [y_predict], exe)\n",
    "\n",
    "# 选择效果最好的pass\n",
    "best = sorted(lists, key=lambda list: float(list[1]))[0]\n",
    "print('Best pass is %s, testing Avgcost is %s' % (best[0], best[1]))\n",
    "print('The classification accuracy is %.2f%%' % (float(best[2]) * 100))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "#指定预测的作用域\n",
    "inference_scope = fluid.core.Scope()\n",
    "with fluid.scope_guard(inference_scope):\n",
    "\n",
    "    # 使用 fluid.io.load_inference_model 获取 inference program,\n",
    "    # feed_target_names 用于指定需要传入网络的变量名\n",
    "    # fetch_targets 指定希望从网络中fetch出的变量名\n",
    "    [inference_program, feed_target_names,\n",
    "     fetch_targets] = fluid.io.load_inference_model(\n",
    "         save_dirname, exe)\n",
    "\n",
    "    # 将feed构建成字典 {feed_target_name: feed_target_data}\n",
    "    # 结果将包含一个与fetch_targets对应的数据列表\n",
    "    # 我们可以实现批量预测，通过一个循环，每次预测一个mini_batch\n",
    "    for mini_batch in test_reader():\n",
    "        test_x = np.array([data[0] for data in mini_batch]).astype(\"float32\")\n",
    "        test_y = np.array([data[1] for data in mini_batch]).astype(\"int64\")\n",
    "        # 真实进行预测\n",
    "        mini_batch_result = exe.run(\n",
    "            inference_program,\n",
    "            feed={feed_target_names[0]: test_x},\n",
    "            fetch_list=fetch_targets)\n",
    "        \n",
    "        # 打印预测结果\n",
    "        mini_batch_result = np.argsort(mini_batch_result) #找出可能性最大的列标，升序排列\n",
    "        mini_batch_result = mini_batch_result[0][:, -1]  #把这些列标拿出来\n",
    "        print('预测结果：%s'%mini_batch_result)\n",
    "\n",
    "        # 打印真实结果  \n",
    "        label = test_y # 转化为 label\n",
    "        print('真实结果：%s'%label)\n",
    "        \n",
    "    #计数\n",
    "    label_len = len(label)\n",
    "    right_counter = 0\n",
    "    for i in range(label_len):\n",
    "        if mini_batch_result[i] == label[i]:\n",
    "            right_counter += 1\n",
    "    \n",
    "    ratio = (right_counter/label_len)\n",
    "    print(\"准确率为：%.2f%%\"%(ratio*100))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PaddlePaddle 1.5.0 (Python 3.5)",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
