{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "本节介绍如何使用飞桨在多GPU上训练神经网络模型，在启动训练前，加载数据和网络结构的代码部分均不变。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "# 加载相关库\n",
    "import os\n",
    "import random\n",
    "import paddle\n",
    "import paddle.fluid as fluid\n",
    "from paddle.fluid.dygraph.nn import Conv2D, Pool2D, FC\n",
    "import numpy as np\n",
    "from PIL import Image\n",
    "\n",
    "import gzip\n",
    "import json\n",
    "\n",
    "# 定义数据集读取器\n",
    "def load_data(mode='train'):\n",
    "\n",
    "    # 读取数据文件\n",
    "    datafile = './work/mnist.json.gz'\n",
    "    print('loading mnist dataset from {} ......'.format(datafile))\n",
    "    data = json.load(gzip.open(datafile))\n",
    "    # 读取数据集中的训练集，验证集和测试集\n",
    "    train_set, val_set, eval_set = data\n",
    "\n",
    "    # 数据集相关参数，图片高度IMG_ROWS, 图片宽度IMG_COLS\n",
    "    IMG_ROWS = 28\n",
    "    IMG_COLS = 28\n",
    "    # 根据输入mode参数决定使用训练集，验证集还是测试\n",
    "    if mode == 'train':\n",
    "        imgs = train_set[0]\n",
    "        labels = train_set[1]\n",
    "    elif mode == 'valid':\n",
    "        imgs = val_set[0]\n",
    "        labels = val_set[1]\n",
    "    elif mode == 'eval':\n",
    "        imgs = eval_set[0]\n",
    "        labels = eval_set[1]\n",
    "    # 获得所有图像的数量\n",
    "    imgs_length = len(imgs)\n",
    "    # 验证图像数量和标签数量是否一致\n",
    "    assert len(imgs) == len(labels), \\\n",
    "          \"length of train_imgs({}) should be the same as train_labels({})\".format(\n",
    "                  len(imgs), len(labels))\n",
    "\n",
    "    index_list = list(range(imgs_length))\n",
    "\n",
    "    # 读入数据时用到的batchsize\n",
    "    BATCHSIZE = 100\n",
    "\n",
    "    # 定义数据生成器\n",
    "    def data_generator():\n",
    "        # 训练模式下，打乱训练数据\n",
    "        if mode == 'train':\n",
    "            random.shuffle(index_list)\n",
    "        imgs_list = []\n",
    "        labels_list = []\n",
    "        # 按照索引读取数据\n",
    "        for i in index_list:\n",
    "            # 读取图像和标签，转换其尺寸和类型\n",
    "            img = np.reshape(imgs[i], [1, IMG_ROWS, IMG_COLS]).astype('float32')\n",
    "            label = np.reshape(labels[i], [1]).astype('int64')\n",
    "            imgs_list.append(img) \n",
    "            labels_list.append(label)\n",
    "            # 如果当前数据缓存达到了batch size，就返回一个批次数据\n",
    "            if len(imgs_list) == BATCHSIZE:\n",
    "                yield np.array(imgs_list), np.array(labels_list)\n",
    "                # 清空数据缓存列表\n",
    "                imgs_list = []\n",
    "                labels_list = []\n",
    "\n",
    "        # 如果剩余数据的数目小于BATCHSIZE，\n",
    "        # 则剩余数据一起构成一个大小为len(imgs_list)的mini-batch\n",
    "        if len(imgs_list) > 0:\n",
    "            yield np.array(imgs_list), np.array(labels_list)\n",
    "\n",
    "    return data_generator\n",
    "\n",
    "\n",
    "# 定义模型结构\n",
    "class MNIST(fluid.dygraph.Layer):\n",
    "     def __init__(self, name_scope):\n",
    "         super(MNIST, self).__init__(name_scope)\n",
    "         name_scope = self.full_name()\n",
    "         # 定义卷积层，输出通道20，卷积核大小为5，步长为1，padding为2，使用relu激活函数\n",
    "         self.conv1 = Conv2D(name_scope, num_filters=20, filter_size=5, stride=1, padding=2, act='relu')\n",
    "         # 定义池化层，池化核为2，采用最大池化方式\n",
    "         self.pool1 = Pool2D(name_scope, pool_size=2, pool_stride=2, pool_type='max')\n",
    "         # 定义卷积层，输出通道20，卷积核大小为5，步长为1，padding为2，使用relu激活函数\n",
    "         self.conv2 = Conv2D(name_scope, num_filters=20, filter_size=5, stride=1, padding=2, act='relu')\n",
    "         # 定义池化层，池化核为2，采用最大池化方式\n",
    "         self.pool2 = Pool2D(name_scope, pool_size=2, pool_stride=2, pool_type='max')\n",
    "         # 定义全连接层，输出节点数为10，激活函数使用softmax\n",
    "         self.fc = FC(name_scope, size=10, act='softmax')\n",
    "         \n",
    "    # 定义网络的前向计算过程\n",
    "     def forward(self, inputs):\n",
    "         x = self.conv1(inputs)\n",
    "         x = self.pool1(x)\n",
    "         x = self.conv2(x)\n",
    "         x = self.pool2(x)\n",
    "         x = self.fc(x)\n",
    "         return x"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 单GPU训练\n",
    "\n",
    "从前几节的训练来看，我们无论是训练房价预测模型还是MNIST手写字符识别模型，训练好一个模型不会超过十分钟，主要原因是我们所使用的神经网络比较简单。但现实生活中，我们可能会遇到更复杂的机器学习、深度学习任务，需要运算速度更高的硬件（GPU、TPU），甚至同时使用多个机器共同训练一个任务（多卡训练和多机训练）。\n",
    "\n",
    "飞桨动态图通过fluid.dygraph.guard(place=None)里的place参数，设置在GPU上训练还是CPU上训练，比如：\n",
    "```\n",
    "with fluid.dygraph.guard(place=fluid.CPUPlace())　#设置使用CPU资源训神经网络。\n",
    "with fluid.dygraph.guard(place=fluid.CUDAPlace(0))　#设置使用GPU资源训神经网络，默认使用机器的第一个GPU。\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {
    "collapsed": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "loading mnist dataset from ./work/mnist.json.gz ......\n",
      "epoch: 0, batch: 0, loss is: [2.8070326]\n",
      "epoch: 0, batch: 200, loss is: [0.5153172]\n",
      "epoch: 0, batch: 400, loss is: [0.44996807]\n",
      "epoch: 1, batch: 0, loss is: [0.18519798]\n",
      "epoch: 1, batch: 200, loss is: [0.30971336]\n",
      "epoch: 1, batch: 400, loss is: [0.22442476]\n"
     ]
    }
   ],
   "source": [
    "#仅前3行代码有所变化，在使用GPU时，可以将use_gpu变量设置成True\n",
    "use_gpu = False\n",
    "place = fluid.CUDAPlace(0) if use_gpu else fluid.CPUPlace()\n",
    "\n",
    "with fluid.dygraph.guard(place):\n",
    "    model = MNIST(\"mnist\")\n",
    "    model.train()\n",
    "    #调用加载数据的函数\n",
    "    train_loader = load_data('train')\n",
    "    \n",
    "    #四种优化算法的设置方案，可以逐一尝试效果\n",
    "    optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.01)\n",
    "    #optimizer = fluid.optimizer.MomentumOptimizer(learning_rate=0.01)\n",
    "    #optimizer = fluid.optimizer.AdagradOptimizer(learning_rate=0.01)\n",
    "    #optimizer = fluid.optimizer.AdamOptimizer(learning_rate=0.01)\n",
    "    \n",
    "    EPOCH_NUM = 2\n",
    "    for epoch_id in range(EPOCH_NUM):\n",
    "        for batch_id, data in enumerate(train_loader()):\n",
    "            #准备数据，变得更加简洁\n",
    "            image_data, label_data = data\n",
    "            image = fluid.dygraph.to_variable(image_data)\n",
    "            label = fluid.dygraph.to_variable(label_data)\n",
    "            \n",
    "            #前向计算的过程\n",
    "            predict = model(image)\n",
    "            \n",
    "            #计算损失，取一个批次样本损失的平均值\n",
    "            loss = fluid.layers.cross_entropy(predict, label)\n",
    "            avg_loss = fluid.layers.mean(loss)\n",
    "            \n",
    "            #每训练了100批次的数据，打印下当前Loss的情况\n",
    "            if batch_id % 200 == 0:\n",
    "                print(\"epoch: {}, batch: {}, loss is: {}\".format(epoch_id, batch_id, avg_loss.numpy()))\n",
    "            \n",
    "            #后向传播，更新参数的过程\n",
    "            avg_loss.backward()\n",
    "            optimizer.minimize(avg_loss)\n",
    "            model.clear_gradients()\n",
    "\n",
    "    #保存模型参数\n",
    "    fluid.save_dygraph(model.state_dict(), 'mnist')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "# 分布式训练\n",
    "\n",
    "在工业实践中，许多较复杂的任务需要使用更强大的模型。强大模型加上海量的训练数据，经常导致模型训练耗时严重。比如在计算机视觉分类任务中，训练一个在ImageNet数据集上精度表现良好的模型，大概需要一周的时间，因为我们需要不断尝试各种优化的思路和方案。如果每次训练均要耗时1周，这会大大降低模型迭代的速度。在机器资源充沛的情况下，我们可以采用分布式训练，大部分模型的训练时间可压缩到小时级别。\n",
    "\n",
    "分布式训练有两种实现模式：模型并行和数据并行。\n",
    "\n",
    "\n",
    "## 1. 模型并行\n",
    "\n",
    "模型并行是将一个网络模型拆分为多份，拆分后的模型分到多个设备上（GPU）训练，每个设备的训练数据是相同的。\n",
    "模型并行的方式一般适用于：\n",
    "1. 模型架构过大，完整的模型无法放入单个GPU。2012年ImageNet大赛的冠军模型AlexNet是模型并行的典型案例。由于当时GPU内存较小，单个GPU不足以承担AlexNet。研究者将AlexNet拆分为两部分放到两个GPU上并行训练。\n",
    "\n",
    "2. 网络模型的设计结构可以并行化时，采用模型并行的方式。例如在计算机视觉目标检测任务中，一些模型（YOLO9000）的边界框回归和类别预测是独立的，可以将独立的部分分在不同的设备节点上完成分布式训练。\n",
    "\n",
    "说明：当前GPU硬件技术快速发展，深度学习使用的主流GPU的内存已经足以满足大多数的网络模型需求，所以大多数情况下使用数据并行的方式。\n",
    "\n",
    "\n",
    "## 2. 数据并行\n",
    "\n",
    "数据并行与模型并行不同，数据并行每次读取多份数据，读取到的数据输入给多个设备（GPU）上的模型，每个设备上的模型是完全相同的。数据并行的方式与众人拾柴火焰高的道理类似，如果把训练数据比喻为砖头，把一个设备（GPU）比喻为一个人，那单GPU训练就是一个人在搬砖，多GPU训练就是多个人同时搬砖，每次搬砖的数量倍数增加，效率呈倍数提升。但是注意到，每个设备的模型是完全相同的，但是输入数据不同，每个设备的模型计算出的梯度是不同的，如果每个设备的梯度更新当前设备的模型就会导致下次训练时，每个模型的参数都不同了，所以我们还需要一个梯度同步机制，保证每个设备的梯度是完全相同的。\n",
    "\n",
    "数据并行中有一个参数管理服务器（parameter server）收集来自每个设备的梯度更新信息，并计算出一个全局的梯度更新。当参数管理服务器收到来自训练设备的梯度更新请求时，统一更新模型的梯度。\n",
    "\n",
    "飞桨有便利的数据并行训练方式，仅改动几行代码即可实现多GPU训练，如果想了解飞桨数据并行的基本思想，可以参考官网文档-[https://www.paddlepaddle.org.cn/documentation/docs/zh/user_guides/howto/training/cluster_howto.html](https://www.paddlepaddle.org.cn/documentation/docs/zh/user_guides/howto/training/cluster_howto.html)。\n",
    "\n",
    "\n",
    "\n",
    "\n",
    "用户只需要对程序进行简单修改，即可实现在多GPU上并行训练。飞桨采用数据并行的实现方式，在训练前，需要配置如下参数：\n",
    "\n",
    "* 1.从环境变量获取设备的ID，并指定给CUDAPlace\n",
    "```\n",
    "  device_id = fluid.dygraph.parallel.Env().dev_id\n",
    "  place = fluid.CUDAPlace(device_id)\n",
    "```\n",
    "* 2.对定义的网络做预处理，设置为并行模式 \n",
    "```\n",
    "  strategy = fluid.dygraph.parallel.prepare_context() ## 新增\n",
    "  model = MNIST(\"mnist\")\n",
    "  model = fluid.dygraph.parallel.DataParallel(model, strategy)  ## 新增\n",
    " ```\n",
    "* 3.定义多GPU训练的reader，将每批次的数据平分到每个GPU上\n",
    "```\n",
    "  valid_loader = paddle.batch(paddle.dataset.mnist.test(), batch_size=16, drop_last=true)\n",
    "  valid_loader = fluid.contrib.reader.distributed_batch_reader(valid_loader)\n",
    "```\n",
    "* 4.收集每批次训练数据的loss，并聚合参数的梯度\n",
    "```\n",
    "  avg_loss = mnist.scale_loss(avg_loss)  ## 新增\n",
    "  avg_loss.backward()\n",
    "  mnist.apply_collective_grads()         ## 新增\n",
    "```\n",
    "\n",
    "完整程序如下所示。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "def train_multi_gpu():\n",
    "    \n",
    "    ##修改1-从环境变量获取使用GPU的序号\n",
    "    place = fluid.CUDAPlace(fluid.dygraph.parallel.Env().dev_id)\n",
    "\n",
    "    with fluid.dygraph.guard(place):\n",
    "    \n",
    "        ##修改2-对原模型做并行化预处理\n",
    "        strategy = fluid.dygraph.parallel.prepare_context()\n",
    "        model = MNIST(\"mnist\")\n",
    "        model = fluid.dygraph.parallel.DataParallel(model, strategy)\n",
    "\n",
    "        model.train()\n",
    "\n",
    "        #调用加载数据的函数\n",
    "        train_loader = load_data('train')\n",
    "        ##修改3-多GPU数据读取，必须确保每个进程读取的数据是不同的\n",
    "        train_loader = fluid.contrib.reader.distributed_batch_reader(train_loader)\n",
    "\n",
    "        optimizer = fluid.optimizer.SGDOptimizer(learning_rate=0.01)\n",
    "        EPOCH_NUM = 5\n",
    "        for epoch_id in range(EPOCH_NUM):\n",
    "            for batch_id, data in enumerate(train_loader()):\n",
    "                #准备数据\n",
    "                image_data, label_data = data\n",
    "                image = fluid.dygraph.to_variable(image_data)\n",
    "                label = fluid.dygraph.to_variable(label_data)\n",
    "\n",
    "                predict = model(image)\n",
    "\n",
    "                loss = fluid.layers.square_error_cost(predict, label)\n",
    "                avg_loss = fluid.layers.mean(loss)\n",
    "\n",
    "                # 修改4-多GPU训练需要对Loss做出调整，并聚合不同设备上的参数梯度\n",
    "                avg_loss = mnist.scale_loss(avg_loss)\n",
    "                avg_loss.backward()\n",
    "                model.apply_collective_grads()\n",
    "                # 最小化损失函数，清除本次训练的梯度\n",
    "                optimizer.minimize(avg_loss)\n",
    "                model.clear_gradients()\n",
    "                \n",
    "                if batch_id % 200 == 0:\n",
    "                    print(\"epoch: {}, batch: {}, loss is: {}\".format(epoch_id, batch_id, avg_loss.numpy()))\n",
    "\n",
    "    #保存模型参数\n",
    "    fluid.save_dygraph(model.state_dict(), 'mnist')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "collapsed": false
   },
   "source": [
    "启动多GPU的训练，需要在命令行设置一些参数变量。打开终端，运行如下命令：\n",
    "\n",
    "```\n",
    "$ python -m paddle.distributed.launch --selected_gpus=0,1,2,3 --log_dir ./mylog train_multi_gpu.py\n",
    "```\n",
    "- paddle.distributed.launch表示启动分布式运行。\n",
    "- 通过selected_gpus设置使用的GPU的序号。当前机器需要是多GPU卡的机器，通过命令watch nvidia-smi可以查看GPU的序号。\n",
    "- log_dir用于存放训练的log，如果不设置，每个GPU上的训练信息都会打印到屏幕。\n",
    "- 多GPU运行的脚本是：train_multi_gpu.py，包含上述修改过的train_multi_gpu()函数。\n",
    "\n",
    "训练完成后，程序会在指定的./mylog文件夹下产生四个worklog文件。每个文件存放对应设备的训练过程日志，其中worklog.0的内容如下："
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": false
   },
   "outputs": [],
   "source": [
    "grep: warning: GREP_OPTIONS is deprecated; please use an alias or script\n",
    "dev_id 0\n",
    "I1104 06:25:04.377323 31961 nccl_context.cc:88] worker: 127.0.0.1:6171 is not ready, will retry after 3 seconds...\n",
    "I1104 06:25:07.377645 31961 nccl_context.cc:127] init nccl context nranks: 3 local rank: 0 gpu id: 1↩\n",
    "W1104 06:25:09.097079 31961 device_context.cc:235] Please NOTE: device: 1, CUDA Capability: 61, Driver API Version: 10.1, Runtime API Version: 9.0\n",
    "W1104 06:25:09.104460 31961 device_context.cc:243] device: 1, cuDNN Version: 7.5.\n",
    "start data reader (trainers_num: 3, trainer_id: 0)\n",
    "epoch: 0, batch_id: 10, loss is: [0.47507238]\n",
    "epoch: 0, batch_id: 20, loss is: [0.25089613]\n",
    "epoch: 0, batch_id: 30, loss is: [0.13120805]\n",
    "epoch: 0, batch_id: 40, loss is: [0.12122715]\n",
    "epoch: 0, batch_id: 50, loss is: [0.07328521]\n",
    "epoch: 0, batch_id: 60, loss is: [0.11860339]\n",
    "epoch: 0, batch_id: 70, loss is: [0.08205047]\n",
    "epoch: 0, batch_id: 80, loss is: [0.08192863]\n",
    "epoch: 0, batch_id: 90, loss is: [0.0736289]\n",
    "epoch: 0, batch_id: 100, loss is: [0.08607423]\n",
    "start data reader (trainers_num: 3, trainer_id: 0)\n",
    "epoch: 1, batch_id: 10, loss is: [0.07032011]\n",
    "epoch: 1, batch_id: 20, loss is: [0.09687119]\n",
    "epoch: 1, batch_id: 30, loss is: [0.0307216]\n",
    "epoch: 1, batch_id: 40, loss is: [0.03884467]\n",
    "epoch: 1, batch_id: 50, loss is: [0.02801813]\n",
    "epoch: 1, batch_id: 60, loss is: [0.05751991]\n",
    "epoch: 1, batch_id: 70, loss is: [0.03721186]\n",
    "....."
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "PaddlePaddle 1.6.0 (Python 3.5)",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 1
}
