{
 "metadata": {
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.9-final"
  },
  "orig_nbformat": 2,
  "kernelspec": {
   "name": "python_defaultSpec_1599361726821",
   "display_name": "Python 3.7.9 64-bit ('d2l': conda)"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2,
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 112,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "features: tensor([[ 1.6785,  1.5345],\n        [ 0.7748, -0.6925],\n        [ 0.6152,  0.8560]]) \nlabel: tensor([[2.3144],\n        [8.0933],\n        [2.5256]])\nfeatures.shape: torch.Size([15, 2])\nlabels.shape: torch.Size([15, 1])\n"
    }
   ],
   "source": [
    "from d2l import torch as d2l\n",
    "import torch\n",
    "\n",
    "true_w = torch.tensor([2, -3.4])\n",
    "true_b = 4.2\n",
    "\n",
    "features, labels = d2l.synthetic_data(true_w, true_b, 15)\n",
    "print('features:', features[0:3],'\\nlabel:', labels[0:3]) #labels 只有1列\n",
    "print('features.shape:',features.shape)\n",
    "print('labels.shape:',labels.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 113,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "text": "\u001b[1;31mSignature:\u001b[0m \u001b[0md2l\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mload_array\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata_arrays\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mis_train\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;31mSource:\u001b[0m   \n\u001b[1;32mdef\u001b[0m \u001b[0mload_array\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdata_arrays\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mis_train\u001b[0m\u001b[1;33m=\u001b[0m\u001b[1;32mTrue\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m  \u001b[1;31m#@save\u001b[0m\u001b[1;33m\n\u001b[0m    \u001b[1;34m\"\"\"Construct a PyTorch data iterator.\"\"\"\u001b[0m\u001b[1;33m\n\u001b[0m    \u001b[0mdataset\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mdata\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensorDataset\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m*\u001b[0m\u001b[0mdata_arrays\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\n\u001b[0m    \u001b[1;32mreturn\u001b[0m \u001b[0mdata\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mDataLoader\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mdataset\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mbatch_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mshuffle\u001b[0m\u001b[1;33m=\u001b[0m\u001b[0mis_train\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;31mFile:\u001b[0m      c:\\anaconda3\\envs\\d2l\\lib\\site-packages\\d2l\\torch.py\n\u001b[1;31mType:\u001b[0m      function\n"
    }
   ],
   "source": [
    "d2l.load_array??"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "params: ((tensor([1, 2]), tensor([3, 4])),)\nparams[0]: (tensor([1, 2]), tensor([3, 4]))\n--------------\nparams: (tensor([1, 2]), tensor([3, 4]))\nparams[0]: tensor([1, 2])\n"
    }
   ],
   "source": [
    "def printinfo(*params ):\n",
    "   print('params:',params)\n",
    "   print('params[0]:',params[0])\n",
    "\n",
    "a = torch.tensor([1,2])\n",
    "b = torch.tensor([3,4])\n",
    "tup2 = (a,b)\n",
    "printinfo(tup2)\n",
    "print('--------------')\n",
    "printinfo(*tup2)\n",
    "# 本质的区别在这里：\n",
    "# 带*号传参，可抵消函数参数的元组化"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 115,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "data_arrays: <class 'tuple'>\ndata_arrays[0] <class 'torch.Tensor'>\nfeatures.shape: torch.Size([15, 2])\n"
    }
   ],
   "source": [
    "data_arrays = (features, labels)\n",
    "print('data_arrays:',type(data_arrays))\n",
    "print('data_arrays[0]',type(data_arrays[0]))\n",
    "print('features.shape:',data_arrays[0].shape)\n",
    "# data_arrays 本身是元组， data_arrays[0] 是tensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 116,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "myTensorDataset tensors <class 'tuple'>\nmyTensorDataset tensors[0] <class 'tuple'>\ninner tensor type <class 'tuple'>\ninner tensor size 2\n"
    }
   ],
   "source": [
    "def myTensorDataset(*tensors):\n",
    "    print('myTensorDataset tensors',type(tensors))\n",
    "    print('myTensorDataset tensors[0]',type(tensors[0]))\n",
    "    for tensor in tensors:\n",
    "        print('inner tensor type', type(tensor))\n",
    "        print('inner tensor size', len(tensor))\n",
    "\n",
    "myTensorDataset(data_arrays)\n",
    "# data_arrays 方式调用\n",
    "# tensors本身是元组， tensors[0] 是元组"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "myTensorDataset tensors <class 'tuple'>\nmyTensorDataset tensors[0] <class 'torch.Tensor'>\ninner tensor type <class 'torch.Tensor'>\ninner tensor size 15\ninner tensor type <class 'torch.Tensor'>\ninner tensor size 15\n"
    }
   ],
   "source": [
    "myTensorDataset(*data_arrays)\n",
    "# *data_arrays 方式调用\n",
    "# tensors本身是元组， tensors[0] 是tensor"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 123,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "text": "\u001b[1;31mSignature:\u001b[0m \u001b[0mdata\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mTensorDataset\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0mtensors\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;31mDocstring:\u001b[0m Initialize self.  See help(type(self)) for accurate signature.\n\u001b[1;31mSource:\u001b[0m   \n    \u001b[1;32mdef\u001b[0m \u001b[0m__init__\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mself\u001b[0m\u001b[1;33m,\u001b[0m \u001b[1;33m*\u001b[0m\u001b[0mtensors\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\n\u001b[0m        \u001b[1;32massert\u001b[0m \u001b[0mall\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mtensors\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m]\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m==\u001b[0m \u001b[0mtensor\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msize\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;36m0\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;32mfor\u001b[0m \u001b[0mtensor\u001b[0m \u001b[1;32min\u001b[0m \u001b[0mtensors\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\n\u001b[0m        \u001b[0mself\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mtensors\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mtensors\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;31mFile:\u001b[0m      c:\\anaconda3\\envs\\d2l\\lib\\site-packages\\torch\\utils\\data\\dataset.py\n\u001b[1;31mType:\u001b[0m      function\n"
    }
   ],
   "source": [
    "from torch.utils import data\n",
    "\n",
    "data.TensorDataset.__init__??"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 119,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "dataset: <class 'torch.utils.data.dataset.TensorDataset'>\ndataset.tensors: <class 'tuple'>\ndataset.tensors[0]: <class 'torch.Tensor'>\ndataset.tensors[0].shape: torch.Size([15, 2])\ndataset.tensors[1].shape: torch.Size([15, 1])\n"
    }
   ],
   "source": [
    "dataset = data.TensorDataset(*data_arrays)\n",
    "print('dataset:',type(dataset))\n",
    "print('dataset.tensors:',type(dataset.tensors))\n",
    "print('dataset.tensors[0]:',type(dataset.tensors[0]))\n",
    "print('dataset.tensors[0].shape:',dataset.tensors[0].shape)\n",
    "print('dataset.tensors[1].shape:',dataset.tensors[1].shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 120,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "data_iter: <class 'torch.utils.data.dataloader.DataLoader'>\n"
    }
   ],
   "source": [
    "# shuffle：洗牌。默认设置为False。在每次迭代训练时是否将数据顺序打乱，为使数据更有独立性，但如果数据是有序列特征的，就不要设置\n",
    "batch_size = 10\n",
    "data_iter = data.DataLoader(dataset, batch_size, shuffle=True)\n",
    "print('data_iter:',type(data_iter))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 122,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": "tensor([[ 2.0877,  0.0274],\n        [ 1.6785,  1.5345],\n        [ 0.4479,  0.5513],\n        [-0.8003,  0.2902],\n        [ 0.6152,  0.8560],\n        [-0.9537, -1.8719],\n        [ 0.2549, -0.4816],\n        [ 0.1248, -0.1618],\n        [ 1.7702, -1.6724],\n        [ 0.4315,  0.7045]]) \n tensor([[ 8.2639],\n        [ 2.3144],\n        [ 3.2161],\n        [ 1.5898],\n        [ 2.5256],\n        [ 8.6439],\n        [ 6.3378],\n        [ 5.0021],\n        [13.4265],\n        [ 2.6745]])\ntensor([[-0.1191, -1.4662],\n        [ 0.9490, -1.8237],\n        [ 1.2000,  0.5734],\n        [ 0.7748, -0.6925],\n        [-0.5653, -1.0612]]) \n tensor([[ 8.9361],\n        [12.2882],\n        [ 4.6687],\n        [ 8.0933],\n        [ 6.6637]])\n"
    }
   ],
   "source": [
    "# 共15条数据，不是batch_size的倍数，最后一次迭代，也能迭代出来\n",
    "for X, y in data_iter:\n",
    "    print(X, '\\n', y)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ]
}