{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "GAN 练习 —— MNIST 手写数字"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from torch import nn\n",
    "from torch.autograd import Variable\n",
    "from torch import optim\n",
    "from torch.utils.data import DataLoader\n",
    "\n",
    "from torchvision.utils import save_image\n",
    "from torchvision import transforms\n",
    "from torchvision.datasets import MNIST"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 搭建网络"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 鉴别网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Discriminator(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.dis = nn.Sequential(\n",
    "                # 输入图片 (1, 28, 28)  展平后 784 \n",
    "            nn.Linear(784, 256),\n",
    "            nn.LeakyReLU(0.2),\n",
    "                # LeakyRuLU 效果优于 ReLU\n",
    "            nn.Linear(256, 256),\n",
    "            nn.LeakyReLU(0.2),\n",
    "            nn.Linear(256, 1),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "        \n",
    "    def forward(self, x):\n",
    "        return self.dis(x)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 生成网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Generator(nn.Module):\n",
    "    def __init__(self, latent_size):\n",
    "        \"\"\"\n",
    "        构造方法\n",
    "        :param latent_size: int, 潜在空间大小\n",
    "        \"\"\"\n",
    "        super().__init__()\n",
    "        self.gen = nn.Sequential(\n",
    "            nn.Linear(latent_size, 256),\n",
    "            nn.ReLU(True),\n",
    "            nn.Linear(256, 256),\n",
    "            nn.ReLU(True),\n",
    "            nn.Linear(256, 784),\n",
    "            nn.Tanh()\n",
    "                # 使输出的数据分布在 -1~1 之间\n",
    "        )\n",
    "        \n",
    "    def forward(self, x):\n",
    "        return self.gen(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 54,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 潜在空间大小 100 维\n",
    "gnet = Generator(100).cuda()\n",
    "dnet = Discriminator().cuda()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 优化器和损失函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 二分类, 二叉交叉熵\n",
    "criterion = nn.BCELoss().cuda()\n",
    "# 生成网络优化器\n",
    "goptimizer = optim.Adam(gnet.parameters(), lr=0.0003)\n",
    "# 鉴别网络优化器\n",
    "doptimizer = optim.Adam(dnet.parameters(), lr=0.0003)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 56,
   "metadata": {},
   "outputs": [],
   "source": [
    "latent_size = 100\n",
    "epoch_num = 100\n",
    "batch_size = 128"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 读入 MNIST 真实数据"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {},
   "outputs": [],
   "source": [
    "img_transform = transforms.Compose(\n",
    "    [\n",
    "        transforms.ToTensor(),\n",
    "        # 0~1 之间\n",
    "        transforms.Normalize(mean=(0.5, ), std=(0.5, ))\n",
    "    ]\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 58,
   "metadata": {},
   "outputs": [],
   "source": [
    "mnist = MNIST(root=\"./data\", train=True, download=False, transform=img_transform)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 59,
   "metadata": {},
   "outputs": [],
   "source": [
    "dataloader = DataLoader(mnist, batch_size=batch_size, shuffle=True, pin_memory=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 假图片生成\n",
    "由784维向量生成假图片"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "def to_img(x: Variable):\n",
    "    out = 0.5 * (x + 1)\n",
    "    out = out.clamp(0, 1)\n",
    "    out = out.view(-1, 1, 28, 28)\n",
    "    return out"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 训练网络"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 60,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0/100] d_loss:0.141720 g_loss:3.595356 \n",
      "[0/100] d_loss:0.026982 g_loss:5.046426 \n",
      "[0/100] d_loss:0.097961 g_loss:5.066807 \n",
      "[0/100] d_loss:0.010385 g_loss:6.452531 \n",
      "[1/100] d_loss:0.068160 g_loss:5.100369 \n",
      "[1/100] d_loss:0.228969 g_loss:5.194922 \n",
      "[1/100] d_loss:1.162702 g_loss:5.063040 \n",
      "[1/100] d_loss:0.344551 g_loss:5.306179 \n",
      "[2/100] d_loss:1.000365 g_loss:3.082730 \n",
      "[2/100] d_loss:0.142555 g_loss:4.442248 \n",
      "[2/100] d_loss:0.460511 g_loss:4.454363 \n",
      "[2/100] d_loss:0.308523 g_loss:5.499413 \n",
      "[3/100] d_loss:0.134059 g_loss:5.204735 \n",
      "[3/100] d_loss:0.265808 g_loss:3.624547 \n",
      "[3/100] d_loss:0.458793 g_loss:4.108181 \n",
      "[3/100] d_loss:0.560358 g_loss:2.840292 \n",
      "[4/100] d_loss:2.567976 g_loss:1.665214 \n",
      "[4/100] d_loss:0.646418 g_loss:3.755733 \n",
      "[4/100] d_loss:0.934487 g_loss:3.494239 \n",
      "[4/100] d_loss:1.384415 g_loss:2.368919 \n",
      "[5/100] d_loss:0.606113 g_loss:7.262081 \n",
      "[5/100] d_loss:0.722731 g_loss:6.647498 \n",
      "[5/100] d_loss:0.720963 g_loss:3.956428 \n",
      "[5/100] d_loss:0.607163 g_loss:3.774949 \n",
      "[6/100] d_loss:1.285271 g_loss:1.968428 \n",
      "[6/100] d_loss:0.692070 g_loss:3.605167 \n",
      "[6/100] d_loss:0.526362 g_loss:2.871313 \n",
      "[6/100] d_loss:0.324681 g_loss:2.713993 \n",
      "[7/100] d_loss:0.551477 g_loss:1.950626 \n",
      "[7/100] d_loss:0.885541 g_loss:2.128503 \n",
      "[7/100] d_loss:0.658084 g_loss:2.829324 \n",
      "[7/100] d_loss:1.014328 g_loss:2.217641 \n",
      "[8/100] d_loss:0.479365 g_loss:2.589123 \n",
      "[8/100] d_loss:0.676977 g_loss:2.838387 \n",
      "[8/100] d_loss:0.710876 g_loss:4.732093 \n",
      "[8/100] d_loss:0.802947 g_loss:3.436901 \n",
      "[9/100] d_loss:0.447484 g_loss:3.569353 \n",
      "[9/100] d_loss:0.240820 g_loss:4.066207 \n",
      "[9/100] d_loss:0.377346 g_loss:3.306737 \n",
      "[9/100] d_loss:0.628826 g_loss:3.723948 \n",
      "[10/100] d_loss:0.583668 g_loss:3.767777 \n",
      "[10/100] d_loss:0.280146 g_loss:3.832457 \n",
      "[10/100] d_loss:0.187434 g_loss:3.520504 \n",
      "[10/100] d_loss:0.683418 g_loss:1.982761 \n",
      "[11/100] d_loss:0.419102 g_loss:2.615163 \n",
      "[11/100] d_loss:0.457033 g_loss:3.503839 \n",
      "[11/100] d_loss:0.330241 g_loss:4.249246 \n",
      "[11/100] d_loss:0.292820 g_loss:3.962206 \n",
      "[12/100] d_loss:0.300362 g_loss:3.491476 \n",
      "[12/100] d_loss:0.386424 g_loss:5.651058 \n",
      "[12/100] d_loss:0.320787 g_loss:4.713462 \n",
      "[12/100] d_loss:0.713973 g_loss:3.219374 \n",
      "[13/100] d_loss:0.510544 g_loss:1.881368 \n",
      "[13/100] d_loss:0.331419 g_loss:2.701660 \n",
      "[13/100] d_loss:2.037575 g_loss:3.183645 \n",
      "[13/100] d_loss:0.672764 g_loss:3.233799 \n",
      "[14/100] d_loss:0.411345 g_loss:3.565291 \n",
      "[14/100] d_loss:0.450265 g_loss:3.497959 \n",
      "[14/100] d_loss:0.585907 g_loss:2.571957 \n",
      "[14/100] d_loss:0.510325 g_loss:2.084713 \n",
      "[15/100] d_loss:1.180392 g_loss:1.412094 \n",
      "[15/100] d_loss:0.726236 g_loss:2.870777 \n",
      "[15/100] d_loss:0.939762 g_loss:2.095949 \n",
      "[15/100] d_loss:1.192655 g_loss:2.729375 \n",
      "[16/100] d_loss:0.491612 g_loss:2.168955 \n",
      "[16/100] d_loss:0.434968 g_loss:3.438758 \n",
      "[16/100] d_loss:0.305746 g_loss:3.526472 \n",
      "[16/100] d_loss:0.480338 g_loss:3.369416 \n",
      "[17/100] d_loss:0.512490 g_loss:3.244763 \n",
      "[17/100] d_loss:0.397152 g_loss:2.485002 \n",
      "[17/100] d_loss:0.471712 g_loss:2.371462 \n",
      "[17/100] d_loss:0.371674 g_loss:3.375565 \n",
      "[18/100] d_loss:0.617092 g_loss:2.487804 \n",
      "[18/100] d_loss:0.929529 g_loss:2.523170 \n",
      "[18/100] d_loss:1.129449 g_loss:3.103806 \n",
      "[18/100] d_loss:0.768478 g_loss:3.340383 \n",
      "[19/100] d_loss:0.376526 g_loss:5.881747 \n",
      "[19/100] d_loss:0.211217 g_loss:4.304198 \n",
      "[19/100] d_loss:0.334689 g_loss:5.768003 \n",
      "[19/100] d_loss:0.270309 g_loss:4.660202 \n",
      "[20/100] d_loss:0.406102 g_loss:3.651333 \n",
      "[20/100] d_loss:0.445546 g_loss:4.722566 \n",
      "[20/100] d_loss:0.411545 g_loss:2.687984 \n",
      "[20/100] d_loss:0.489833 g_loss:3.415195 \n",
      "[21/100] d_loss:0.456058 g_loss:2.997221 \n",
      "[21/100] d_loss:0.254463 g_loss:3.764703 \n",
      "[21/100] d_loss:0.307882 g_loss:3.803940 \n",
      "[21/100] d_loss:0.262113 g_loss:4.462633 \n",
      "[22/100] d_loss:0.278759 g_loss:4.906270 \n",
      "[22/100] d_loss:0.249105 g_loss:4.171684 \n",
      "[22/100] d_loss:0.148643 g_loss:3.131519 \n",
      "[22/100] d_loss:0.403741 g_loss:3.747325 \n",
      "[23/100] d_loss:0.350488 g_loss:3.630187 \n",
      "[23/100] d_loss:0.414279 g_loss:4.665396 \n",
      "[23/100] d_loss:0.252905 g_loss:3.732935 \n",
      "[23/100] d_loss:0.307316 g_loss:4.456016 \n",
      "[24/100] d_loss:0.392207 g_loss:3.706067 \n",
      "[24/100] d_loss:0.200373 g_loss:3.533310 \n",
      "[24/100] d_loss:0.814604 g_loss:3.771586 \n",
      "[24/100] d_loss:0.470479 g_loss:2.224657 \n",
      "[25/100] d_loss:0.373030 g_loss:3.124804 \n",
      "[25/100] d_loss:0.492553 g_loss:2.836075 \n",
      "[25/100] d_loss:0.284204 g_loss:3.173567 \n",
      "[25/100] d_loss:0.799199 g_loss:2.599338 \n",
      "[26/100] d_loss:0.368429 g_loss:3.559313 \n",
      "[26/100] d_loss:0.918710 g_loss:2.813418 \n",
      "[26/100] d_loss:0.501889 g_loss:2.876106 \n",
      "[26/100] d_loss:0.338495 g_loss:3.668719 \n",
      "[27/100] d_loss:0.256835 g_loss:3.329873 \n",
      "[27/100] d_loss:0.367018 g_loss:3.461717 \n",
      "[27/100] d_loss:0.237603 g_loss:3.521915 \n",
      "[27/100] d_loss:0.503922 g_loss:3.591081 \n",
      "[28/100] d_loss:0.497751 g_loss:5.015053 \n",
      "[28/100] d_loss:0.619998 g_loss:3.481224 \n",
      "[28/100] d_loss:0.743533 g_loss:3.016027 \n",
      "[28/100] d_loss:0.408994 g_loss:4.213766 \n",
      "[29/100] d_loss:0.501582 g_loss:4.022658 \n",
      "[29/100] d_loss:0.413535 g_loss:3.225266 \n",
      "[29/100] d_loss:0.395872 g_loss:3.658255 \n",
      "[29/100] d_loss:0.299506 g_loss:4.272304 \n",
      "[30/100] d_loss:0.459707 g_loss:3.206203 \n",
      "[30/100] d_loss:0.629414 g_loss:4.062567 \n",
      "[30/100] d_loss:0.662581 g_loss:3.425371 \n",
      "[30/100] d_loss:0.270972 g_loss:2.694955 \n",
      "[31/100] d_loss:0.482704 g_loss:3.984205 \n",
      "[31/100] d_loss:0.692273 g_loss:2.975600 \n",
      "[31/100] d_loss:0.406854 g_loss:3.651404 \n",
      "[31/100] d_loss:0.389427 g_loss:3.388155 \n",
      "[32/100] d_loss:0.467754 g_loss:2.793312 \n",
      "[32/100] d_loss:0.553325 g_loss:3.506294 \n"
     ]
    },
    {
     "ename": "KeyboardInterrupt",
     "evalue": "",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m                         Traceback (most recent call last)",
      "\u001b[1;32m<ipython-input-60-a3c7bf59594e>\u001b[0m in \u001b[0;36m<module>\u001b[1;34m\u001b[0m\n\u001b[0;32m     44\u001b[0m         \u001b[0mgoptimizer\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mzero_grad\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     45\u001b[0m         \u001b[0mgloss\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mbackward\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m---> 46\u001b[1;33m         \u001b[0mgoptimizer\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mstep\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m     47\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m     48\u001b[0m         \u001b[1;32mif\u001b[0m \u001b[1;33m(\u001b[0m\u001b[0mi\u001b[0m\u001b[1;33m+\u001b[0m\u001b[1;36m1\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m%\u001b[0m \u001b[1;36m100\u001b[0m \u001b[1;33m==\u001b[0m \u001b[1;36m0\u001b[0m\u001b[1;33m:\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;32mC:\\Anaconda3\\envs\\torch\\lib\\site-packages\\torch\\optim\\adam.py\u001b[0m in \u001b[0;36mstep\u001b[1;34m(self, closure)\u001b[0m\n\u001b[0;32m    105\u001b[0m                 \u001b[0mstep_size\u001b[0m \u001b[1;33m=\u001b[0m \u001b[0mgroup\u001b[0m\u001b[1;33m[\u001b[0m\u001b[1;34m'lr'\u001b[0m\u001b[1;33m]\u001b[0m \u001b[1;33m*\u001b[0m \u001b[0mmath\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0msqrt\u001b[0m\u001b[1;33m(\u001b[0m\u001b[0mbias_correction2\u001b[0m\u001b[1;33m)\u001b[0m \u001b[1;33m/\u001b[0m \u001b[0mbias_correction1\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    106\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[1;32m--> 107\u001b[1;33m                 \u001b[0mp\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0mdata\u001b[0m\u001b[1;33m.\u001b[0m\u001b[0maddcdiv_\u001b[0m\u001b[1;33m(\u001b[0m\u001b[1;33m-\u001b[0m\u001b[0mstep_size\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mexp_avg\u001b[0m\u001b[1;33m,\u001b[0m \u001b[0mdenom\u001b[0m\u001b[1;33m)\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0m\u001b[0;32m    108\u001b[0m \u001b[1;33m\u001b[0m\u001b[0m\n\u001b[0;32m    109\u001b[0m         \u001b[1;32mreturn\u001b[0m \u001b[0mloss\u001b[0m\u001b[1;33m\u001b[0m\u001b[1;33m\u001b[0m\u001b[0m\n",
      "\u001b[1;31mKeyboardInterrupt\u001b[0m: "
     ]
    }
   ],
   "source": [
    "fixed_noise = torch.randn(batch_size, latent_size).cuda()\n",
    "\n",
    "for epoch in range(epoch_num):\n",
    "    for i, ( img, _ ) in enumerate(dataloader):\n",
    "        num_img = img.size(0)\n",
    "        \n",
    "        # ==== 训练鉴别网络 ====\n",
    "            # 真实数据\n",
    "        img = img.view(num_img, -1)\n",
    "        real_img = Variable(img).cuda()\n",
    "        real_label = Variable(torch.ones(num_img)).cuda().unsqueeze(1)\n",
    "            # 真数据在鉴别网络上预测\n",
    "        real_out = dnet(real_img)\n",
    "            # 计算真实数据上的损失\n",
    "        dloss_real = criterion(real_out, real_label)\n",
    "        real_scores = real_out\n",
    "        \n",
    "            # 潜在空间的随机向量\n",
    "        noises = Variable(torch.randn(num_img, latent_size)).cuda()\n",
    "            # 生成假图片\n",
    "        fake_img = gnet(noises)\n",
    "        fake_label = Variable(torch.zeros(num_img)).cuda().unsqueeze(1)\n",
    "            # 假数据在鉴别网络上预测\n",
    "        fake_out = dnet(fake_img)\n",
    "            # 鉴别网络在假数据的损失\n",
    "        \n",
    "        dloss_fake = criterion(fake_out, fake_label)\n",
    "        fake_scores = fake_out\n",
    "        \n",
    "            # 计算总损失, 反向传播, 更新权重\n",
    "        dloss = dloss_real + dloss_fake\n",
    "        doptimizer.zero_grad()\n",
    "        dloss.backward()\n",
    "        doptimizer.step()\n",
    "        \n",
    "        # ==== 训练生成网络 ====\n",
    "            # 生成假图片\n",
    "        noises = Variable(torch.randn(num_img, latent_size)).cuda()\n",
    "        fake_img = gnet(noises)\n",
    "        \n",
    "        out = dnet(fake_img)\n",
    "        gloss = criterion(out, real_label)\n",
    "        \n",
    "        goptimizer.zero_grad()\n",
    "        gloss.backward()\n",
    "        goptimizer.step()\n",
    "        \n",
    "        if (i+1) % 100 == 0:\n",
    "            print(f\"[{epoch}/{epoch_num}] d_loss:{dloss.data:.6f} g_loss:{gloss.data:.6f} \")\n",
    "            # d_real:{real_scores.data.mean():.6f} d_fake:{fake_scores.data.mean():.6f}\n",
    "    if epoch == 0:\n",
    "        real_imgs = to_img(real_img.cpu().data)\n",
    "        save_image(real_imgs, \"./data/img/real_imgs.png\")\n",
    "        \n",
    "    fake_imgs = to_img(gnet(fixed_noise))\n",
    "    save_image(fake_imgs, f\"./data/fixedImg/fakeImgs_epoch{epoch+1}.png\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.7"
  },
  "latex_envs": {
   "LaTeX_envs_menu_present": true,
   "autoclose": false,
   "autocomplete": true,
   "bibliofile": "biblio.bib",
   "cite_by": "apalike",
   "current_citInitial": 1,
   "eqLabelWithNumbers": true,
   "eqNumInitial": 1,
   "hotkeys": {
    "equation": "Ctrl-E",
    "itemize": "Ctrl-I"
   },
   "labels_anchors": false,
   "latex_user_defs": false,
   "report_style_numbering": false,
   "user_envs_cfg": false
  },
  "toc": {
   "base_numbering": 1,
   "nav_menu": {},
   "number_sections": true,
   "sideBar": true,
   "skip_h1_title": false,
   "title_cell": "Table of Contents",
   "title_sidebar": "Contents",
   "toc_cell": false,
   "toc_position": {},
   "toc_section_display": true,
   "toc_window_display": false
  },
  "varInspector": {
   "cols": {
    "lenName": 16,
    "lenType": 16,
    "lenVar": 40
   },
   "kernels_config": {
    "python": {
     "delete_cmd_postfix": "",
     "delete_cmd_prefix": "del ",
     "library": "var_list.py",
     "varRefreshCmd": "print(var_dic_list())"
    },
    "r": {
     "delete_cmd_postfix": ") ",
     "delete_cmd_prefix": "rm(",
     "library": "var_list.r",
     "varRefreshCmd": "cat(var_dic_list()) "
    }
   },
   "types_to_exclude": [
    "module",
    "function",
    "builtin_function_or_method",
    "instance",
    "_Feature"
   ],
   "window_display": false
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
