{
 "cells": [
  {
   "cell_type": "markdown",
   "source": [
    "请注意，只调用my_tensor.to(device)并没有复制张量到GPU上，而是返回了一个copy。所以你需要把它赋值给一个新的张量并在GPU上使用这个张量。\n",
    "在多GPU上执行前向和反向传播是自然而然的事。 但是PyTorch默认将只使用一个GPU。\n",
    "使用DataParallel可以轻易的让模型并行运行在多个GPU上。"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "input_size = 5\n",
    "output_size = 2\n",
    "\n",
    "batch_size = 30\n",
    "data_size = 100\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "制作一个虚拟（随机）数据集， 你只需实现 __getitem__"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "source": [
    "class RandomDataset(Dataset):\n",
    "    def __init__(self, size, length):\n",
    "        self.len = length\n",
    "        self.data = torch.randn(length,  size)\n",
    "    \n",
    "    def __getitem__(self, index):\n",
    "        return self.data[index]\n",
    "    \n",
    "    def __len__(self):\n",
    "        return self.len\n",
    "\n",
    "rand_loader = DataLoader(dataset=RandomDataset(input_size,data_size),\n",
    "                                                            batch_size=batch_size,shuffle=True)\n"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "class Model(nn.Module):\n",
    "    def __init__(self,input_size,output_size):\n",
    "        super(Model,self).__init__()\n",
    "        self.fc = nn.Linear(input_size, output_size)\n",
    "    \n",
    "    def forward(self,input):\n",
    "        output = self.fc(input)\n",
    "        print(\"\\tIn Model: input size\", input.size(),\"output_size \",output.size())\n",
    "        return output"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "创建一个模型和数据并行¶\n",
    "这是本教程的核心部分。\n",
    "\n",
    "首先，我们需要创建一个模型实例和检测我们是否有多个GPU。 如果有多个GPU，使用nn.DataParallel来包装我们的模型。 然后通过model.to(device)把模型放到GPU上。"
   ],
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "source": [
    "model = Model(input_size, output_size)\n",
    "if torch.cuda.device_count() > 1:\n",
    "    print(\"Let's use\", torch.cuda.device_count(), \"GPUs!\")\n",
    "    # dim = 0 [30, xxx] -> [10, ...], [10, ...], [10, ...] on 3 GPUs\n",
    "    model = nn.DataParallel(model)\n",
    "\n",
    "model.to(device)\n",
    "\n",
    "for data in rand_loader:\n",
    "    input = data.to(device)\n",
    "    output = model(input)\n",
    "    print(\"Outside: input size\", input.size(),\n",
    "          \"output_size\", output.size())"
   ],
   "outputs": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [
    "![](2021-10-16-10-07-00.png)"
   ],
   "metadata": {}
  },
  {
   "cell_type": "markdown",
   "source": [],
   "metadata": {}
  }
 ],
 "metadata": {
  "orig_nbformat": 4,
  "language_info": {
   "name": "python",
   "version": "3.8.10",
   "mimetype": "text/x-python",
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "pygments_lexer": "ipython3",
   "nbconvert_exporter": "python",
   "file_extension": ".py"
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3.8.10 64-bit ('d2l-zh': conda)"
  },
  "interpreter": {
   "hash": "0bedf3452ed48e38a2b75ff9f66ce8ae9ae2e92e8665cb618ee0797d2f46b9e5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}