{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "7f4e4862",
   "metadata": {},
   "outputs": [],
   "source": [
    "zidian = {\n",
    "    '<PAD>': 0,\n",
    "    '1': 1,\n",
    "    '2': 2,\n",
    "    '3': 3,\n",
    "    '4': 4,\n",
    "    '5': 5,\n",
    "    '6': 6,\n",
    "    '7': 7,\n",
    "    '8': 8,\n",
    "    '9': 9,\n",
    "    '0': 10,\n",
    "    'Jan': 11,\n",
    "    'Feb': 12,\n",
    "    'Mar': 13,\n",
    "    'Apr': 14,\n",
    "    'May': 15,\n",
    "    'Jun': 16,\n",
    "    'Jul': 17,\n",
    "    'Aug': 18,\n",
    "    'Sep': 19,\n",
    "    'Oct': 20,\n",
    "    'Nov': 21,\n",
    "    'Dec': 22,\n",
    "    '-': 23,\n",
    "    '/': 24,\n",
    "    '<SOS>': 25,\n",
    "    '<EOS>': 26,\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "652b0528",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[10, 10, 23,  1,  2, 23,  2,  8],\n",
       "         [ 9,  3, 23,  1,  2, 23,  1,  7],\n",
       "         [ 2, 10, 23, 10,  3, 23,  1,  2],\n",
       "         [ 2, 10, 23,  1,  1, 23, 10,  1],\n",
       "         [10, 10, 23, 10,  5, 23,  1,  3]]),\n",
       " torch.Size([100, 8]),\n",
       " tensor([[25,  2,  8, 24, 22, 24,  2, 10, 10, 10, 26],\n",
       "         [25,  1,  7, 24, 22, 24,  1,  9,  9,  3, 26],\n",
       "         [25,  1,  2, 24, 13, 24,  2, 10,  2, 10, 26],\n",
       "         [25, 10,  1, 24, 21, 24,  2, 10,  2, 10, 26],\n",
       "         [25,  1,  3, 24, 15, 24,  2, 10, 10, 10, 26]]),\n",
       " torch.Size([100, 11]))"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import torch\n",
    "import torch.nn as nn\n",
    "import numpy as np\n",
    "import datetime\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "\n",
    "class DateDataset(Dataset):\n",
    "    def __init__(self):\n",
    "        pass\n",
    "\n",
    "    def __len__(self):\n",
    "        return 2000\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        #随机生成一个日期\n",
    "        date = np.random.randint(143835585, 2043835585)\n",
    "        date = datetime.datetime.fromtimestamp(date)\n",
    "\n",
    "        #格式化成两种格式\n",
    "        #05-06-15\n",
    "        #15/Jun/2005\n",
    "        date_cn = date.strftime(\"%y-%m-%d\")\n",
    "        date_en = date.strftime(\"%d/%b/%Y\")\n",
    "\n",
    "        #中文的就是简单的拿字典编码就行了\n",
    "        date_cn_code = [zidian[v] for v in date_cn]\n",
    "\n",
    "        #英文的,首先要在首尾加上标志位,然后用字典编码\n",
    "        date_en_code = []\n",
    "        date_en_code += [zidian['<SOS>']]\n",
    "        date_en_code += [zidian[v] for v in date_en[:3]]\n",
    "        date_en_code += [zidian[date_en[3:6]]]\n",
    "        date_en_code += [zidian[v] for v in date_en[6:]]\n",
    "        date_en_code += [zidian['<EOS>']]\n",
    "\n",
    "        return torch.LongTensor(date_cn_code), torch.LongTensor(date_en_code)\n",
    "\n",
    "\n",
    "dataloader = DataLoader(dataset=DateDataset(),\n",
    "                        batch_size=100,\n",
    "                        shuffle=True,\n",
    "                        drop_last=True)\n",
    "\n",
    "#遍历数据\n",
    "for i, data in enumerate(dataloader):\n",
    "    sample = data\n",
    "    break\n",
    "sample[0][:5], sample[0].shape, sample[1][:5], sample[1].shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "5b4cd93c",
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(tensor([[ 0.1030, -0.1141, -0.1131,  0.0795, -0.0905, -0.0374,  0.1219, -0.1612,\n",
       "           0.0579,  0.1664, -0.2122, -0.1027,  0.0780, -0.0654, -0.1115,  0.0588,\n",
       "           0.0916, -0.1834,  0.0258, -0.0365, -0.0360, -0.0095, -0.0952,  0.0344,\n",
       "           0.1635,  0.0589,  0.0351],\n",
       "         [ 0.0419, -0.1616, -0.0572,  0.0688, -0.1074,  0.0127,  0.0630, -0.1885,\n",
       "           0.0040,  0.1648, -0.1619, -0.0888,  0.1776, -0.0717, -0.1585,  0.1230,\n",
       "           0.0006, -0.1844,  0.0451, -0.0121,  0.0281, -0.0370, -0.0675,  0.0560,\n",
       "           0.0824,  0.0116,  0.0791]], grad_fn=<SliceBackward>),\n",
       " torch.Size([100, 10, 27]))"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "class Seq2Seq(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "\n",
    "        #encoder\n",
    "        #一共27个词,编码成16维向量\n",
    "        self.encoder_embed = nn.Embedding(num_embeddings=27, embedding_dim=16)\n",
    "\n",
    "        #输入是16维向量,隐藏层是32维向量\n",
    "        self.encoder = nn.LSTM(input_size=16,\n",
    "                               hidden_size=32,\n",
    "                               num_layers=1,\n",
    "                               batch_first=True)\n",
    "\n",
    "        #decoder\n",
    "        #一共27个词,编码成16维向量\n",
    "        self.decoder_embed = nn.Embedding(num_embeddings=27, embedding_dim=16)\n",
    "\n",
    "        #输入是16维向量,隐藏层是32维向量\n",
    "        self.decoder_cell = nn.LSTMCell(input_size=16, hidden_size=32)\n",
    "\n",
    "        #输入是32维向量,输出是27分类\n",
    "        self.out_fc = nn.Linear(in_features=32, out_features=27)\n",
    "\n",
    "    def forward(self, x, y):\n",
    "\n",
    "        #x编码\n",
    "        #[b,8] -> [b,8,16]\n",
    "        x = self.encoder_embed(x)\n",
    "\n",
    "        #进入循环网络,得到记忆\n",
    "        #[b,8,16] -> [b,8,32],[1,b,32],[1,b,32]\n",
    "        _, (h, c) = self.encoder(x, None)\n",
    "\n",
    "        #[1,b,32],[1,b,32] -> [b,32],[b,32]\n",
    "        h = h.squeeze()\n",
    "        c = c.squeeze()\n",
    "\n",
    "        #丢弃y的最后一个词\n",
    "        #因为训练的时候是以y的每一个词输入,预测下一个词\n",
    "        #所以不需要最后一个词\n",
    "        #[b,11] -> [b,10]\n",
    "        y = y[:, :-1]\n",
    "\n",
    "        #y编码\n",
    "        #[b,10] -> [b,10,16]\n",
    "        y = self.decoder_embed(y)\n",
    "\n",
    "        #用cell遍历y的每一个词\n",
    "        outs = []\n",
    "        for i in range(10):\n",
    "\n",
    "            #把y的每个词依次输入循环网络\n",
    "            #第一个词的记忆是x的最后一个词的记忆\n",
    "            #往后每个词的记忆是上一个词的记忆\n",
    "            #[b,16] -> [b,32],[b,32]\n",
    "            h, c = self.decoder_cell(y[:, i], (h, c))\n",
    "\n",
    "            #把每一步的记忆输出成词\n",
    "            #[b,32] -> [b,27]\n",
    "            out = self.out_fc(h)\n",
    "            outs.append(out)\n",
    "\n",
    "        #把所有的输出词组合成一句话\n",
    "        outs = torch.stack(outs, dim=0)\n",
    "        #[10,b,27] -> #[b,10,27]\n",
    "        outs = outs.permute(1, 0, 2)\n",
    "\n",
    "        return outs\n",
    "\n",
    "\n",
    "model = Seq2Seq()\n",
    "\n",
    "out = model(sample[0], sample[1])\n",
    "out[0, :2], out.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "20f541cd",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0 1.8169605731964111\n",
      "10 0.3342939019203186\n",
      "20 0.09271496534347534\n",
      "30 0.013728859834372997\n",
      "40 0.005192959681153297\n",
      "50 0.0030120087321847677\n",
      "60 0.0020933125633746386\n",
      "70 0.0014203935861587524\n",
      "80 0.001078095636330545\n",
      "90 0.0008668229565955698\n",
      "100 0.0006723636179231107\n",
      "110 0.0005770153948105872\n",
      "120 0.0004773383552674204\n",
      "130 0.00037985510425642133\n",
      "140 0.00029425040702335536\n",
      "150 0.0002678190066944808\n",
      "160 0.0002279395266668871\n",
      "170 0.00021694882889278233\n",
      "180 0.00017270594253204763\n",
      "190 0.00015532056568190455\n"
     ]
    }
   ],
   "source": [
    "loss_func = nn.CrossEntropyLoss()\n",
    "optimizer = torch.optim.Adam(model.parameters(), lr=1e-2)\n",
    "\n",
    "model.train()\n",
    "for epoch in range(200):\n",
    "    for i, data in enumerate(dataloader):\n",
    "        x, y = data\n",
    "\n",
    "        optimizer.zero_grad()\n",
    "\n",
    "        #计算输出\n",
    "        y_pred = model(x, y)\n",
    "\n",
    "        #丢弃y的第一个词\n",
    "        #因为训练的时候是以y的每一个词输入,预测下一个词\n",
    "        #所以在计算loss的时候不需要第一个词\n",
    "        #[b,11] -> [b,10]\n",
    "        y = y[:, 1:]\n",
    "\n",
    "        #打平,不然计算不了loss\n",
    "        #[b,10,27] -> [b*10,27]\n",
    "        y_pred = y_pred.reshape(-1, 27)\n",
    "\n",
    "        #[b,10] -> [b*10]\n",
    "        y = y.reshape(-1)\n",
    "\n",
    "        loss = loss_func(y_pred, y)\n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "\n",
    "    if epoch % 10 == 0:\n",
    "        print(epoch, loss.item())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "06874eb1",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "('00-12-28', '<SOS>28/Dec/2000<EOS>')"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "#构造反转的字典\n",
    "reverse_zidian = {}\n",
    "for k, v in zidian.items():\n",
    "    reverse_zidian[v] = k\n",
    "reverse_zidian\n",
    "\n",
    "\n",
    "#数字化的句子转字符串\n",
    "def seq_to_str(seq):\n",
    "    seq = seq.detach().numpy()\n",
    "    return ''.join([reverse_zidian[idx] for idx in seq])\n",
    "\n",
    "\n",
    "seq_to_str(sample[0][0]), seq_to_str(sample[1][0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "af32f9b3",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "80-12-27 <SOS>27/Dec/1980<EOS> 27/Dec/1980\n",
      "30-04-05 <SOS>05/Apr/2030<EOS> 05/Apr/2030\n",
      "23-05-10 <SOS>10/May/2023<EOS> 10/May/2023\n",
      "87-01-18 <SOS>18/Jan/1987<EOS> 18/Jan/1987\n",
      "22-07-10 <SOS>10/Jul/2022<EOS> 10/Jul/2022\n",
      "92-05-24 <SOS>24/May/1992<EOS> 24/May/1992\n",
      "25-11-10 <SOS>10/Nov/2025<EOS> 10/Nov/2025\n",
      "08-01-28 <SOS>28/Jan/2008<EOS> 28/Jan/2008\n",
      "97-09-12 <SOS>12/Sep/1997<EOS> 12/Sep/1997\n",
      "93-09-25 <SOS>25/Sep/1993<EOS> 25/Sep/1993\n",
      "95-11-06 <SOS>06/Nov/1995<EOS> 06/Nov/1995\n",
      "23-09-29 <SOS>29/Sep/2023<EOS> 29/Sep/2023\n",
      "89-11-04 <SOS>04/Nov/1989<EOS> 04/Nov/1989\n",
      "01-08-05 <SOS>05/Aug/2001<EOS> 05/Aug/2001\n",
      "80-02-22 <SOS>22/Feb/1980<EOS> 22/Feb/1980\n",
      "98-01-28 <SOS>28/Jan/1998<EOS> 28/Jan/1998\n",
      "24-12-03 <SOS>03/Dec/2024<EOS> 03/Dec/2024\n",
      "95-05-19 <SOS>19/May/1995<EOS> 19/May/1995\n",
      "22-03-27 <SOS>27/Mar/2022<EOS> 27/Mar/2022\n",
      "05-10-07 <SOS>07/Oct/2005<EOS> 07/Oct/2005\n",
      "88-08-01 <SOS>01/Aug/1988<EOS> 01/Aug/1988\n",
      "27-08-28 <SOS>28/Aug/2027<EOS> 28/Aug/2027\n",
      "91-05-31 <SOS>31/May/1991<EOS> 31/May/1991\n",
      "75-11-07 <SOS>07/Nov/1975<EOS> 07/Nov/1975\n",
      "80-03-25 <SOS>25/Mar/1980<EOS> 25/Mar/1980\n",
      "20-05-02 <SOS>02/May/2020<EOS> 02/May/2020\n",
      "04-09-02 <SOS>02/Sep/2004<EOS> 02/Sep/2004\n",
      "88-01-27 <SOS>27/Jan/1988<EOS> 27/Jan/1988\n",
      "22-07-06 <SOS>06/Jul/2022<EOS> 06/Jul/2022\n",
      "97-09-19 <SOS>19/Sep/1997<EOS> 19/Sep/1997\n",
      "06-04-30 <SOS>30/Apr/2006<EOS> 30/Apr/2006\n",
      "10-03-27 <SOS>27/Mar/2010<EOS> 27/Mar/2010\n",
      "98-06-29 <SOS>29/Jun/1998<EOS> 29/Jun/1998\n",
      "03-10-13 <SOS>13/Oct/2003<EOS> 13/Oct/2003\n",
      "97-07-29 <SOS>29/Jul/1997<EOS> 29/Jul/1997\n",
      "27-05-16 <SOS>16/May/2027<EOS> 16/May/2027\n",
      "15-03-30 <SOS>30/Mar/2015<EOS> 30/Mar/2015\n",
      "98-09-16 <SOS>16/Sep/1998<EOS> 16/Sep/1998\n",
      "20-03-26 <SOS>26/Mar/2020<EOS> 26/Mar/2020\n",
      "32-02-22 <SOS>22/Feb/2032<EOS> 22/Feb/2032\n",
      "15-05-08 <SOS>08/May/2015<EOS> 08/May/2015\n",
      "26-05-13 <SOS>13/May/2026<EOS> 13/May/2026\n",
      "16-11-24 <SOS>24/Nov/2016<EOS> 24/Nov/2016\n",
      "30-07-16 <SOS>16/Jul/2030<EOS> 16/Jul/2030\n",
      "88-09-11 <SOS>11/Sep/1988<EOS> 11/Sep/1988\n",
      "24-09-30 <SOS>30/Sep/2024<EOS> 30/Sep/2024\n",
      "87-10-10 <SOS>10/Oct/1987<EOS> 10/Oct/1987\n",
      "98-10-13 <SOS>13/Oct/1998<EOS> 13/Oct/1998\n",
      "89-05-24 <SOS>24/May/1989<EOS> 24/May/1989\n",
      "32-12-24 <SOS>24/Dec/2032<EOS> 24/Dec/2032\n",
      "18-05-25 <SOS>25/May/2018<EOS> 25/May/2018\n",
      "27-10-21 <SOS>21/Oct/2027<EOS> 21/Oct/2027\n",
      "91-04-26 <SOS>26/Apr/1991<EOS> 26/Apr/1991\n",
      "20-02-14 <SOS>14/Feb/2020<EOS> 14/Feb/2020\n",
      "10-12-27 <SOS>27/Dec/2010<EOS> 27/Dec/2010\n",
      "78-08-27 <SOS>27/Aug/1978<EOS> 27/Aug/1978\n",
      "16-01-20 <SOS>20/Jan/2016<EOS> 20/Jan/2016\n",
      "79-10-21 <SOS>21/Oct/1979<EOS> 21/Oct/1979\n",
      "27-12-09 <SOS>09/Dec/2027<EOS> 09/Dec/2027\n",
      "28-03-14 <SOS>14/Mar/2028<EOS> 14/Mar/2028\n",
      "23-01-02 <SOS>02/Jan/2023<EOS> 02/Jan/2023\n",
      "30-09-11 <SOS>11/Sep/2030<EOS> 11/Sep/2030\n",
      "80-09-07 <SOS>07/Sep/1980<EOS> 07/Sep/1980\n",
      "26-09-02 <SOS>02/Sep/2026<EOS> 02/Sep/2026\n",
      "27-02-16 <SOS>16/Feb/2027<EOS> 16/Feb/2027\n",
      "31-10-21 <SOS>21/Oct/2031<EOS> 21/Oct/2031\n",
      "04-05-21 <SOS>21/May/2004<EOS> 21/May/2004\n",
      "18-12-10 <SOS>10/Dec/2018<EOS> 10/Dec/2018\n",
      "29-09-02 <SOS>02/Sep/2029<EOS> 02/Sep/2029\n",
      "23-05-25 <SOS>25/May/2023<EOS> 25/May/2023\n",
      "10-11-25 <SOS>25/Nov/2010<EOS> 25/Nov/2010\n",
      "23-07-30 <SOS>30/Jul/2023<EOS> 30/Jul/2023\n",
      "30-08-01 <SOS>01/Aug/2030<EOS> 01/Aug/2030\n",
      "00-06-12 <SOS>12/Jun/2000<EOS> 12/Jun/2000\n",
      "02-12-13 <SOS>13/Dec/2002<EOS> 13/Dec/2002\n",
      "74-10-17 <SOS>17/Oct/1974<EOS> 17/Oct/1974\n",
      "29-03-12 <SOS>12/Mar/2029<EOS> 12/Mar/2029\n",
      "83-06-26 <SOS>26/Jun/1983<EOS> 26/Jun/1983\n",
      "14-03-19 <SOS>19/Mar/2014<EOS> 19/Mar/2014\n",
      "80-09-22 <SOS>22/Sep/1980<EOS> 22/Sep/1980\n",
      "26-01-23 <SOS>23/Jan/2026<EOS> 23/Jan/2026\n",
      "20-03-06 <SOS>06/Mar/2020<EOS> 06/Mar/2020\n",
      "76-01-17 <SOS>17/Jan/1976<EOS> 17/Jan/1976\n",
      "78-10-17 <SOS>17/Oct/1978<EOS> 17/Oct/1978\n",
      "85-06-12 <SOS>12/Jun/1985<EOS> 12/Jun/1985\n",
      "11-02-15 <SOS>15/Feb/2011<EOS> 15/Feb/2011\n",
      "83-06-27 <SOS>27/Jun/1983<EOS> 27/Jun/1983\n",
      "80-06-09 <SOS>09/Jun/1980<EOS> 09/Jun/1980\n",
      "05-02-09 <SOS>09/Feb/2005<EOS> 09/Feb/2005\n",
      "20-12-23 <SOS>23/Dec/2020<EOS> 23/Dec/2020\n",
      "77-08-12 <SOS>12/Aug/1977<EOS> 12/Aug/1977\n",
      "78-11-17 <SOS>17/Nov/1978<EOS> 17/Nov/1978\n",
      "98-05-22 <SOS>22/May/1998<EOS> 22/May/1998\n",
      "08-11-06 <SOS>06/Nov/2008<EOS> 06/Nov/2008\n",
      "06-08-06 <SOS>06/Aug/2006<EOS> 06/Aug/2006\n",
      "06-02-06 <SOS>06/Feb/2006<EOS> 06/Feb/2006\n",
      "97-01-24 <SOS>24/Jan/1997<EOS> 24/Jan/1997\n",
      "29-09-06 <SOS>06/Sep/2029<EOS> 06/Sep/2029\n",
      "91-10-23 <SOS>23/Oct/1991<EOS> 23/Oct/1991\n",
      "95-07-26 <SOS>26/Jul/1995<EOS> 26/Jul/1995\n"
     ]
    }
   ],
   "source": [
    "#预测\n",
    "def predict(x):\n",
    "    model.eval()\n",
    "\n",
    "    #x编码\n",
    "    #[b,8] -> [b,8,16]\n",
    "    x = model.encoder_embed(x)\n",
    "    #进入循环网络,得到记忆\n",
    "    #[b,8,16] -> [b,8,32],[1,b,32],[1,b,32]\n",
    "    _, (h, c) = model.encoder(x, None)\n",
    "\n",
    "    #[1,b,32],[1,b,32] -> [b,32],[b,32]\n",
    "    h = h.squeeze()\n",
    "    c = c.squeeze()\n",
    "\n",
    "    #初始化输入,每一个词的输入应该是上一个词的输出\n",
    "    #因为我们的y第一个词固定是<SOS>,所以直接以这个词开始\n",
    "    #[b]\n",
    "    out = torch.full((x.size(0), ), zidian['<SOS>'], dtype=torch.int64)\n",
    "    #[b] -> [b,16]\n",
    "    out = model.decoder_embed(out)\n",
    "\n",
    "    #循环生成9个词,收尾的两个标签没有预测的价值,直接忽略了\n",
    "    outs = []\n",
    "    for i in range(9):\n",
    "\n",
    "        #把每个词输入循环网络\n",
    "        #第一个词的记忆是x的最后一个词的记忆\n",
    "        #往后每个词的记忆是上一个词的记忆\n",
    "        #[b,16] -> [b,32],[b,32]\n",
    "        h, c = model.decoder_cell(out, (h, c))\n",
    "\n",
    "        #[b,32] -> [b,27]\n",
    "        out = model.out_fc(h)\n",
    "\n",
    "        #把每一步的记忆输出成词\n",
    "        #[b,27] -> [b]\n",
    "        out = out.argmax(dim=1)\n",
    "        outs.append(out)\n",
    "\n",
    "        #把这一步的输出作为下一步的输入\n",
    "        #[b] -> [b,16]\n",
    "        out = model.decoder_embed(out)\n",
    "\n",
    "    #把所有的输出词组合成一句话\n",
    "    #[9,b]\n",
    "    outs = torch.stack(outs, dim=0)\n",
    "    #[9,b] -> [b,9]\n",
    "    outs = outs.permute(1, 0)\n",
    "\n",
    "    return outs\n",
    "\n",
    "\n",
    "#测试\n",
    "for i, data in enumerate(dataloader):\n",
    "    x, y = data\n",
    "    y_pred = predict(x)\n",
    "    for xi, yi, pi in zip(x, y, y_pred):\n",
    "        print(seq_to_str(xi), seq_to_str(yi), seq_to_str(pi))\n",
    "    break"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
