{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import re\n",
    "os.environ['TOKENIZERS_PARALLELISM'] = \"false\"\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n",
    "import sys\n",
    "sys.path.insert(1, os.path.join(sys.path[0], './utils'))\n",
    "sys.path.insert(1, os.path.join(sys.path[0], './pytorch'))\n",
    "import numpy as np\n",
    "import argparse\n",
    "import time\n",
    "import logging\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torch.utils.data\n",
    " \n",
    "from utilities import (create_folder, get_filename, create_logging, Mixup, \n",
    "    StatisticsContainer, split_phrases, pad_or_truncate)\n",
    "from models import (Cnn14, Cnn14_no_specaug, Cnn14_no_dropout, \n",
    "    Cnn6, Cnn10, ResNet22, ResNet38, ResNet54, Cnn14_emb512, Cnn14_emb128, \n",
    "    Cnn14_emb32, MobileNetV1, MobileNetV2, LeeNet11, LeeNet24, DaiNet19, \n",
    "    Res1dNet31, Res1dNet51, Wavegram_Cnn14, Wavegram_Logmel_Cnn14, \n",
    "    Wavegram_Logmel128_Cnn14, Cnn14_16k, Cnn14_8k, Cnn14_mel32, Cnn14_mel128, \n",
    "    Cnn14_mixup_time_domain, Cnn14_DecisionLevelMax, Cnn14_DecisionLevelAtt, Cnn14Bert)\n",
    "from pytorch_utils import (move_data_to_device, count_parameters, count_flops, \n",
    "    do_mixup)\n",
    "from data_generator import (AudioSetDataset, TrainSampler, BalancedTrainSampler, \n",
    "    AlternateTrainSampler, EvaluateSampler, collate_fn, AudioSetBiModalDataset, get_collate_fn)\n",
    "from evaluate import Evaluator, EvaluatorBiModal\n",
    "import config\n",
    "import librosa\n",
    "import pandas as pd\n",
    "from collections import defaultdict\n",
    "from tqdm import tqdm, trange\n",
    "import pickle\n",
    "import multiprocessing\n",
    "from sklearn.metrics.pairwise import cosine_similarity\n",
    "from rank_metrics import retrieval_as_classification"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at prajjwal1/bert-medium were not used when initializing BertModel: ['cls.predictions.transform.dense.weight', 'cls.predictions.decoder.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.seq_relationship.weight', 'cls.predictions.bias', 'cls.seq_relationship.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.bias']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "model = Cnn14Bert(sample_rate=32000, window_size=1024, \n",
    "        hop_size=160, mel_bins=64, fmin=50, fmax=8000, \n",
    "        bert_model_type=\"prajjwal1/bert-medium\", \n",
    "        max_seq_len=16, shared_dim=1024)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "resume_checkpoint_path = \"/backup/data/Audioset/zelin/workspaces/audioset_tagging/checkpoints_bimodal/main_bimodal/sample_rate=32000,window_size=1024,hop_size=160,mel_bins=64,fmin=50,fmax=8000/data_type=balanced_train/Cnn14Bert/bert_type=prajjwal1/bert-medium/balanced=balanced/max_text_nums=128/max_seq_len=16/batch_size=16/50000_iterations.pth\"\n",
    "checkpoint = torch.load(resume_checkpoint_path)\n",
    "model.load_state_dict(checkpoint['model'], strict=False)\n",
    "model.cuda()\n",
    "model.eval()\n",
    "None"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "build index for Clotho"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>youtube_id</th>\n",
       "      <th>start_time</th>\n",
       "      <th>caption</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>audiocap_id</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>97151</th>\n",
       "      <td>vfY_TJq7n_U</td>\n",
       "      <td>130</td>\n",
       "      <td>Rustling occurs, ducks quack and water splashe...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>108945</th>\n",
       "      <td>tdWhHV3X25Q</td>\n",
       "      <td>60</td>\n",
       "      <td>An audience gives applause as a man yells and ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>107898</th>\n",
       "      <td>tw76HGONaKg</td>\n",
       "      <td>570</td>\n",
       "      <td>A man speaks over intermittent keyboard taps</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>107893</th>\n",
       "      <td>y2bVZ7rz-5M</td>\n",
       "      <td>280</td>\n",
       "      <td>Motor noise is followed by a horn honking and ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>107892</th>\n",
       "      <td>ti66RjZWTp0</td>\n",
       "      <td>20</td>\n",
       "      <td>A male speaks as metal clicks and a gun fires ...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "              youtube_id  start_time  \\\n",
       "audiocap_id                            \n",
       "97151        vfY_TJq7n_U         130   \n",
       "108945       tdWhHV3X25Q          60   \n",
       "107898       tw76HGONaKg         570   \n",
       "107893       y2bVZ7rz-5M         280   \n",
       "107892       ti66RjZWTp0          20   \n",
       "\n",
       "                                                       caption  \n",
       "audiocap_id                                                     \n",
       "97151        Rustling occurs, ducks quack and water splashe...  \n",
       "108945       An audience gives applause as a man yells and ...  \n",
       "107898            A man speaks over intermittent keyboard taps  \n",
       "107893       Motor noise is followed by a horn honking and ...  \n",
       "107892       A male speaks as metal clicks and a gun fires ...  "
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "val_df = pd.read_csv(\"/home/zhiling/py3_workspace/aser_audioset/audio_Caps/val.csv\", index_col=0)\n",
    "val_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "495"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "org_yid_stimes = set()\n",
    "for rid, row in val_df.iterrows():\n",
    "    org_yid_stimes.add((row['youtube_id'], str(row['start_time'])))\n",
    "len(org_yid_stimes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "495\n"
     ]
    }
   ],
   "source": [
    "yid_time2fname = {}\n",
    "audio_dir = '/backup/data/audiocaps/audio/val/'\n",
    "for fname in os.listdir(audio_dir):\n",
    "    tmp = fname[:-4].split(\"_\")\n",
    "    stime = tmp[-2][:-4]\n",
    "    yid = \"_\".join(tmp[:-2])\n",
    "    if (yid, stime) in org_yid_stimes:\n",
    "        yid_time2fname[(yid, stime)] = fname\n",
    "print(len(yid_time2fname))  # all matched"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "2872"
      ]
     },
     "execution_count": 7,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cap_id2caption = {}\n",
    "cap_id2phrases = {}\n",
    "cap_id2audio_id = {}\n",
    "# just use integer as audio_id\n",
    "audio_id2filename = sorted(os.listdir(audio_dir))\n",
    "filename2audio_id = {x:i for i, x in enumerate(audio_id2filename)}\n",
    "unique_phrases = set()  # phrase_id2phrase = list(unique_phrases)\n",
    "phrase2cap_ids = defaultdict(list)\n",
    "for i, (rid, row) in enumerate(val_df.iterrows()):\n",
    "    cap_id = rid\n",
    "    cap = row[\"caption\"]\n",
    "    yid, stime = row['youtube_id'], str(row['start_time'])\n",
    "    fname = yid_time2fname[(yid, stime)]\n",
    "    audio_id = filename2audio_id[fname]\n",
    "    phrases = split_phrases(cap)\n",
    "    cap_id2phrases[cap_id] = phrases\n",
    "    cap_id2caption[cap_id] = cap\n",
    "    cap_id2audio_id[cap_id] = audio_id\n",
    "    for phrase in phrases:\n",
    "        unique_phrases.add(phrase)\n",
    "        phrase2cap_ids[phrase].append(cap_id)\n",
    "# just use integer as phrase_id\n",
    "phrase_id2phrase = list(unique_phrases)\n",
    "len(phrase2cap_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "2475\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "2    1226\n",
       "1     761\n",
       "3     411\n",
       "4      68\n",
       "5       8\n",
       "6       1\n",
       "dtype: int64"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# number of phrases per caption\n",
    "print(len(cap_id2caption))\n",
    "pd.Series([len(x) for x in cap_id2phrases.values()]).value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1      2410\n",
       "2       229\n",
       "3        80\n",
       "4        40\n",
       "5        25\n",
       "6        21\n",
       "7        15\n",
       "8        11\n",
       "10        7\n",
       "11        6\n",
       "9         4\n",
       "13        3\n",
       "12        3\n",
       "25        2\n",
       "24        2\n",
       "17        2\n",
       "20        2\n",
       "16        1\n",
       "88        1\n",
       "30        1\n",
       "42        1\n",
       "72        1\n",
       "23        1\n",
       "97        1\n",
       "15        1\n",
       "19        1\n",
       "231       1\n",
       "dtype: int64"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# number of captions per phrase\n",
    "pd.Series([len(x) for x in phrase2cap_ids.values()]).value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 45/45 [00:00<00:00, 87.58it/s]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(2872, 1024)"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "phrase_embeddings = []\n",
    "bs = 64\n",
    "for i in trange(0, len(phrase_id2phrase), bs):\n",
    "    phrases_batch = phrase_id2phrase[i:i+bs]\n",
    "    text_emb = model(texts=phrases_batch)['text_emb']\n",
    "    phrase_embeddings.append(text_emb.detach().cpu().numpy())\n",
    "phrase_embeddings = np.concatenate(phrase_embeddings, 0)\n",
    "phrase_embeddings.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_wav(full_path):\n",
    "    (waveform, _) = librosa.core.load(full_path, sr=config.sample_rate, mono=True)\n",
    "    waveform = pad_or_truncate(waveform, config.clip_samples)\n",
    "    return waveform"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def multiprocess_load_wav(wav_dir, num_process=8):\n",
    "    pool = multiprocessing.Pool(num_process)\n",
    "    fnames = [wav_dir+fname for fname in sorted(os.listdir(wav_dir))]\n",
    "    waveforms = list(tqdm(pool.imap(load_wav, fnames), total=len(fnames)))\n",
    "    waveforms = np.stack(waveforms, 0)\n",
    "    return waveforms, sorted(os.listdir(wav_dir))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 495/495 [00:18<00:00, 26.25it/s]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(495, 320000)"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_dir = \"/backup/data/audiocaps/audio/val/\"\n",
    "waveforms, fnames = multiprocess_load_wav(audio_dir, num_process=8)\n",
    "waveforms.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0, 1, 2, 3, 4]\n"
     ]
    }
   ],
   "source": [
    "filename2audio_id = {x:i for i, x in enumerate(audio_id2filename)}\n",
    "reindex = [filename2audio_id[fname] for fname in fnames]\n",
    "print(reindex[:5])\n",
    "waveforms = waveforms[reindex, :]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 62/62 [00:07<00:00,  8.42it/s]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(495, 1024)"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_embeddings = []\n",
    "bs = 8\n",
    "for i in trange(0, len(waveforms), bs):\n",
    "    audio_batch = move_data_to_device(waveforms[i:i+bs], 'cuda')\n",
    "    audio_emb = model(waveform=audio_batch)['audio_emb']\n",
    "    audio_embeddings.append(audio_emb.detach().cpu().numpy())\n",
    "audio_embeddings = np.concatenate(audio_embeddings, 0)\n",
    "audio_embeddings.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('results/phrase_audio_index_audiocaps.pkl', 'wb') as f:\n",
    "    pickle.dump([cap_id2caption, cap_id2phrases, cap_id2audio_id, audio_id2filename, phrase2cap_ids, phrase_id2phrase, yid_time2fname, phrase_embeddings, audio_embeddings], f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(495, 2872)"
      ]
     },
     "execution_count": 17,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_phrase_sims = cosine_similarity(audio_embeddings, phrase_embeddings)\n",
    "audio_phrase_sims.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.04552408, 0.02628758, 0.08747004, 0.07961898, 0.062186  ],\n",
       "      dtype=float32)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_phrase_sims[:5, :].max(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [],
   "source": [
    "audio_id2cap_ids = [set() for i in range(len(audio_id2filename))]\n",
    "for cid, aid in cap_id2audio_id.items():\n",
    "    audio_id2cap_ids[aid].add(cid)\n",
    "phrase2phrase_id = {x:i for i, x in enumerate(phrase_id2phrase)}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "all_cap_ids = sorted(cap_id2phrases.keys())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "audio->caption"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 495/495 [00:34<00:00, 14.51it/s]\n"
     ]
    }
   ],
   "source": [
    "audio_caption_labels = np.zeros((len(audio_id2cap_ids), len(all_cap_ids)))\n",
    "audio_caption_sims_mean = np.zeros((len(audio_id2cap_ids), len(all_cap_ids)))\n",
    "audio_caption_sims_max = np.zeros((len(audio_id2cap_ids), len(all_cap_ids)))\n",
    "\n",
    "for aid, sims in enumerate(tqdm(audio_phrase_sims)):\n",
    "    pos_cap_ids = audio_id2cap_ids[aid]\n",
    "    for j, cap_id in enumerate(all_cap_ids):\n",
    "        phrases = cap_id2phrases[cap_id]\n",
    "        audio_caption_labels[aid, j] = int(cap_id in pos_cap_ids)\n",
    "        phrase_sims = []\n",
    "        for phrase in phrases:\n",
    "            phrase_id = phrase2phrase_id[phrase]\n",
    "            phrase_sims.append(sims[phrase_id])\n",
    "        audio_caption_sims_mean[aid, j] = np.mean(phrase_sims)\n",
    "        audio_caption_sims_max[aid, j] = np.max(phrase_sims)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(495, 2475)"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_caption_sims_mean.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[-0.23291151, -0.20355475, -0.19193192, -0.25316167, -0.13299946],\n",
       "       [-0.14962661, -0.24571335, -0.20929241, -0.15394932, -0.28756487],\n",
       "       [-0.18265878, -0.33738175, -0.23520075, -0.17205462, -0.24104358],\n",
       "       [-0.2469468 , -0.2221161 , -0.18562722, -0.23489875, -0.06275778],\n",
       "       [-0.17200831, -0.32005724, -0.18614209, -0.21109188, -0.18905993]])"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_caption_sims_mean[:5,:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Results calculated for 2475 queries\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'R1': 7.313131313131313,\n",
       " 'R5': 24.363636363636363,\n",
       " 'R10': 37.61616161616162,\n",
       " 'R50': 71.47474747474747,\n",
       " 'MedR': 18.0,\n",
       " 'MeanR': 50.285656565656566,\n",
       " 'geometric_mean_R1-R5-R10': 18.854136750469134}"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rank_results = retrieval_as_classification(audio_caption_sims_mean, audio_caption_labels)\n",
    "rank_results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Results calculated for 2475 queries\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'R1': 6.0606060606060606,\n",
       " 'R5': 21.292929292929294,\n",
       " 'R10': 34.78787878787879,\n",
       " 'R50': 68.12121212121212,\n",
       " 'MedR': 22.0,\n",
       " 'MeanR': 56.28363636363636,\n",
       " 'geometric_mean_R1-R5-R10': 16.496550415804204}"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rank_results = retrieval_as_classification(audio_caption_sims_max, audio_caption_labels)\n",
    "rank_results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 29,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.020202020202020204"
      ]
     },
     "execution_count": 29,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "50/2475"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "74d07dc07065639914d79fc95a011a49c7b91ce6bf0d82680fb06e81dd1e87b0"
  },
  "kernelspec": {
   "display_name": "Python 3.6.10 64-bit ('py36': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
