{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import re\n",
    "os.environ['TOKENIZERS_PARALLELISM'] = \"false\"\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n",
    "import sys\n",
    "sys.path.insert(1, os.path.join(sys.path[0], './utils'))\n",
    "sys.path.insert(1, os.path.join(sys.path[0], './pytorch'))\n",
    "import numpy as np\n",
    "import argparse\n",
    "import time\n",
    "import logging\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torch.utils.data\n",
    " \n",
    "from utilities import (create_folder, get_filename, create_logging, Mixup, \n",
    "    StatisticsContainer, split_phrases, pad_or_truncate)\n",
    "from models import (Cnn14, Cnn14_no_specaug, Cnn14_no_dropout, \n",
    "    Cnn6, Cnn10, ResNet22, ResNet38, ResNet54, Cnn14_emb512, Cnn14_emb128, \n",
    "    Cnn14_emb32, MobileNetV1, MobileNetV2, LeeNet11, LeeNet24, DaiNet19, \n",
    "    Res1dNet31, Res1dNet51, Wavegram_Cnn14, Wavegram_Logmel_Cnn14, \n",
    "    Wavegram_Logmel128_Cnn14, Cnn14_16k, Cnn14_8k, Cnn14_mel32, Cnn14_mel128, \n",
    "    Cnn14_mixup_time_domain, Cnn14_DecisionLevelMax, Cnn14_DecisionLevelAtt, Cnn14Bert)\n",
    "from pytorch_utils import (move_data_to_device, count_parameters, count_flops, \n",
    "    do_mixup)\n",
    "from data_generator import (AudioSetDataset, TrainSampler, BalancedTrainSampler, \n",
    "    AlternateTrainSampler, EvaluateSampler, collate_fn, AudioSetBiModalDataset, get_collate_fn)\n",
    "from evaluate import Evaluator, EvaluatorBiModal\n",
    "import config\n",
    "import librosa\n",
    "import pandas as pd\n",
    "from collections import defaultdict\n",
    "from tqdm import tqdm, trange\n",
    "import pickle\n",
    "import multiprocessing\n",
    "from sklearn.metrics.pairwise import cosine_similarity\n",
    "from rank_metrics import retrieval_as_classification"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at prajjwal1/bert-medium were not used when initializing BertModel: ['cls.predictions.decoder.weight', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.bias', 'cls.predictions.transform.dense.bias', 'cls.predictions.decoder.bias', 'cls.seq_relationship.weight', 'cls.predictions.bias', 'cls.predictions.transform.dense.weight']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "model = Cnn14Bert(sample_rate=32000, window_size=1024, \n",
    "        hop_size=160, mel_bins=64, fmin=50, fmax=8000, \n",
    "        bert_model_type=\"prajjwal1/bert-medium\", \n",
    "        max_seq_len=16, shared_dim=1024)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "resume_checkpoint_path = \"/backup/data/Audioset/zelin/workspaces/audioset_tagging/checkpoints_bimodal/main_bimodal/sample_rate=32000,window_size=1024,hop_size=160,mel_bins=64,fmin=50,fmax=8000/data_type=balanced_train/Cnn14Bert/bert_type=prajjwal1/bert-medium/balanced=balanced/max_text_nums=128/max_seq_len=16/batch_size=16/50000_iterations.pth\"\n",
    "checkpoint = torch.load(resume_checkpoint_path)\n",
    "model.load_state_dict(checkpoint['model'], strict=False)\n",
    "model.cuda()\n",
    "model.eval()\n",
    "None"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "build index for Clotho"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>youtube_id</th>\n",
       "      <th>start_time</th>\n",
       "      <th>caption</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>audiocap_id</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>103549</th>\n",
       "      <td>7fmOlUlwoNg</td>\n",
       "      <td>20</td>\n",
       "      <td>constant rattle noise and sharp vibration</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>103548</th>\n",
       "      <td>6BJ455B1aAs</td>\n",
       "      <td>0</td>\n",
       "      <td>a rocket fly by follow by a loud explosion and...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>103541</th>\n",
       "      <td>GOD8Bt5LfDE</td>\n",
       "      <td>100</td>\n",
       "      <td>hum and vibrate with a man and child speak and...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>103540</th>\n",
       "      <td>YQSuFyFm3Lc</td>\n",
       "      <td>230</td>\n",
       "      <td>a train run on a railroad track follow by a ve...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>103542</th>\n",
       "      <td>VjSEIRnLAh8</td>\n",
       "      <td>30</td>\n",
       "      <td>food be fry , and a woman talk</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "              youtube_id  start_time  \\\n",
       "audiocap_id                            \n",
       "103549       7fmOlUlwoNg          20   \n",
       "103548       6BJ455B1aAs           0   \n",
       "103541       GOD8Bt5LfDE         100   \n",
       "103540       YQSuFyFm3Lc         230   \n",
       "103542       VjSEIRnLAh8          30   \n",
       "\n",
       "                                                       caption  \n",
       "audiocap_id                                                     \n",
       "103549               constant rattle noise and sharp vibration  \n",
       "103548       a rocket fly by follow by a loud explosion and...  \n",
       "103541       hum and vibrate with a man and child speak and...  \n",
       "103540       a train run on a railroad track follow by a ve...  \n",
       "103542                          food be fry , and a woman talk  "
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "val_df = pd.read_csv(\"/home/zhiling/py3_workspace/aser_audioset/audio_Caps/test_lemma.csv\", index_col=0)\n",
    "val_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "975"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "org_yid_stimes = set()\n",
    "for rid, row in val_df.iterrows():\n",
    "    org_yid_stimes.add((row['youtube_id'], str(row['start_time'])))\n",
    "len(org_yid_stimes)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "968\n"
     ]
    }
   ],
   "source": [
    "yid_time2fname = {}\n",
    "audio_dir = '/backup/data/audiocaps/audio/test/'\n",
    "for fname in os.listdir(audio_dir):\n",
    "    tmp = fname[:-4].split(\"_\")\n",
    "    stime = tmp[-2][:-4]\n",
    "    yid = \"_\".join(tmp[:-2])\n",
    "    if (yid, stime) in org_yid_stimes:\n",
    "        yid_time2fname[(yid, stime)] = fname\n",
    "print(len(yid_time2fname))  # 975 -> 968"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "5622"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cap_id2caption = {}\n",
    "cap_id2phrases = {}\n",
    "cap_id2audio_id = {}\n",
    "# just use integer as audio_id\n",
    "audio_id2filename = sorted(os.listdir(audio_dir))\n",
    "filename2audio_id = {x:i for i, x in enumerate(audio_id2filename)}\n",
    "unique_phrases = set()  # phrase_id2phrase = list(unique_phrases)\n",
    "phrase2cap_ids = defaultdict(list)\n",
    "for i, (rid, row) in enumerate(val_df.iterrows()):\n",
    "    cap_id = rid\n",
    "    cap = row[\"caption\"]\n",
    "    yid, stime = row['youtube_id'], str(row['start_time'])\n",
    "    if (yid, stime) not in yid_time2fname:\n",
    "        continue\n",
    "    fname = yid_time2fname[(yid, stime)]\n",
    "    audio_id = filename2audio_id[fname]\n",
    "    phrases = split_phrases(cap)\n",
    "    cap_id2phrases[cap_id] = phrases\n",
    "    cap_id2caption[cap_id] = cap\n",
    "    cap_id2audio_id[cap_id] = audio_id\n",
    "    for phrase in phrases:\n",
    "        unique_phrases.add(phrase)\n",
    "        phrase2cap_ids[phrase].append(cap_id)\n",
    "# just use integer as phrase_id\n",
    "phrase_id2phrase = list(unique_phrases)\n",
    "len(phrase2cap_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "4840\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "2    1991\n",
       "1    1185\n",
       "3    1123\n",
       "4     399\n",
       "5     113\n",
       "6      24\n",
       "7       4\n",
       "8       1\n",
       "dtype: int64"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# number of phrases per caption\n",
    "print(len(cap_id2caption))\n",
    "pd.Series([len(x) for x in cap_id2phrases.values()]).value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1      4550\n",
       "2       498\n",
       "3       198\n",
       "4        81\n",
       "5        56\n",
       "6        37\n",
       "7        28\n",
       "8        24\n",
       "10       18\n",
       "9        16\n",
       "12       13\n",
       "11       12\n",
       "13       11\n",
       "14        9\n",
       "15        9\n",
       "16        5\n",
       "17        4\n",
       "36        4\n",
       "18        4\n",
       "33        3\n",
       "20        3\n",
       "24        3\n",
       "22        3\n",
       "23        3\n",
       "21        3\n",
       "39        2\n",
       "19        2\n",
       "38        2\n",
       "26        2\n",
       "80        1\n",
       "104       1\n",
       "52        1\n",
       "48        1\n",
       "32        1\n",
       "71        1\n",
       "25        1\n",
       "29        1\n",
       "41        1\n",
       "45        1\n",
       "51        1\n",
       "30        1\n",
       "54        1\n",
       "62        1\n",
       "122       1\n",
       "242       1\n",
       "306       1\n",
       "27        1\n",
       "49        1\n",
       "dtype: int64"
      ]
     },
     "execution_count": 10,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# number of captions per phrase\n",
    "pd.Series([len(x) for x in phrase2cap_ids.values()]).value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 88/88 [00:01<00:00, 85.58it/s]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(5622, 1024)"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "phrase_embeddings = []\n",
    "bs = 64\n",
    "for i in trange(0, len(phrase_id2phrase), bs):\n",
    "    phrases_batch = phrase_id2phrase[i:i+bs]\n",
    "    text_emb = model(texts=phrases_batch)['text_emb']\n",
    "    phrase_embeddings.append(text_emb.detach().cpu().numpy())\n",
    "phrase_embeddings = np.concatenate(phrase_embeddings, 0)\n",
    "phrase_embeddings.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_wav(full_path):\n",
    "    (waveform, _) = librosa.core.load(full_path, sr=config.sample_rate, mono=True)\n",
    "    waveform = pad_or_truncate(waveform, config.clip_samples)\n",
    "    return waveform"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def multiprocess_load_wav(wav_dir, num_process=8):\n",
    "    pool = multiprocessing.Pool(num_process)\n",
    "    fnames = [wav_dir+fname for fname in sorted(os.listdir(wav_dir))]\n",
    "    waveforms = list(tqdm(pool.imap(load_wav, fnames), total=len(fnames)))\n",
    "    waveforms = np.stack(waveforms, 0)\n",
    "    return waveforms, sorted(os.listdir(wav_dir))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 968/968 [00:36<00:00, 26.58it/s]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(968, 320000)"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "waveforms, fnames = multiprocess_load_wav(audio_dir, num_process=8)\n",
    "waveforms.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[0, 1, 2, 3, 4]\n"
     ]
    }
   ],
   "source": [
    "filename2audio_id = {x:i for i, x in enumerate(audio_id2filename)}\n",
    "reindex = [filename2audio_id[fname] for fname in fnames]\n",
    "print(reindex[:5])\n",
    "waveforms = waveforms[reindex, :]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 121/121 [00:14<00:00,  8.34it/s]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(968, 1024)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_embeddings = []\n",
    "bs = 8\n",
    "for i in trange(0, len(waveforms), bs):\n",
    "    audio_batch = move_data_to_device(waveforms[i:i+bs], 'cuda')\n",
    "    audio_emb = model(waveform=audio_batch)['audio_emb']\n",
    "    audio_embeddings.append(audio_emb.detach().cpu().numpy())\n",
    "audio_embeddings = np.concatenate(audio_embeddings, 0)\n",
    "audio_embeddings.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('results/phrase_audio_index_audiocaps_test.pkl', 'wb') as f:\n",
    "    pickle.dump([cap_id2caption, cap_id2phrases, cap_id2audio_id, audio_id2filename, phrase2cap_ids, phrase_id2phrase, yid_time2fname, phrase_embeddings, audio_embeddings], f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(968, 5622)"
      ]
     },
     "execution_count": 18,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_phrase_sims = cosine_similarity(audio_embeddings, phrase_embeddings)\n",
    "audio_phrase_sims.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.0854277 , 0.0066642 , 0.07702723, 0.07343467, 0.00946152],\n",
       "      dtype=float32)"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_phrase_sims[:5, :].max(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [],
   "source": [
    "audio_id2cap_ids = [set() for i in range(len(audio_id2filename))]\n",
    "for cid, aid in cap_id2audio_id.items():\n",
    "    audio_id2cap_ids[aid].add(cid)\n",
    "phrase2phrase_id = {x:i for i, x in enumerate(phrase_id2phrase)}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "all_cap_ids = sorted(cap_id2phrases.keys())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "audio->caption"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 968/968 [02:11<00:00,  7.34it/s]\n"
     ]
    }
   ],
   "source": [
    "audio_caption_labels = np.zeros((len(audio_id2cap_ids), len(all_cap_ids)))\n",
    "audio_caption_sims_mean = np.zeros((len(audio_id2cap_ids), len(all_cap_ids)))\n",
    "audio_caption_sims_max = np.zeros((len(audio_id2cap_ids), len(all_cap_ids)))\n",
    "\n",
    "for aid, sims in enumerate(tqdm(audio_phrase_sims)):\n",
    "    pos_cap_ids = audio_id2cap_ids[aid]\n",
    "    for j, cap_id in enumerate(all_cap_ids):\n",
    "        phrases = cap_id2phrases[cap_id]\n",
    "        audio_caption_labels[aid, j] = int(cap_id in pos_cap_ids)\n",
    "        phrase_sims = []\n",
    "        for phrase in phrases:\n",
    "            phrase_id = phrase2phrase_id[phrase]\n",
    "            phrase_sims.append(sims[phrase_id])\n",
    "        audio_caption_sims_mean[aid, j] = np.mean(phrase_sims)\n",
    "        audio_caption_sims_max[aid, j] = np.max(phrase_sims)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 23,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(968, 4840)"
      ]
     },
     "execution_count": 23,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_caption_sims_mean.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[-0.12795952, -0.14531417, -0.27522081, -0.11061811, -0.26146248],\n",
       "       [-0.25216997, -0.24834245, -0.30694944, -0.23019558, -0.21495421],\n",
       "       [-0.30410916, -0.3212232 , -0.01860259, -0.35263726, -0.34731412],\n",
       "       [-0.29330236, -0.19283956, -0.30541313, -0.12095603, -0.19720593],\n",
       "       [-0.31142363, -0.25410903, -0.28437185, -0.32346606, -0.16168159]])"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_caption_sims_mean[:5,:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Results calculated for 4840 queries\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'R1': 5.371900826446281,\n",
       " 'R5': 17.56198347107438,\n",
       " 'R10': 27.74793388429752,\n",
       " 'R50': 58.264462809917354,\n",
       " 'MedR': 35.0,\n",
       " 'MeanR': 92.52169421487604,\n",
       " 'geometric_mean_R1-R5-R10': 13.781952025172675}"
      ]
     },
     "execution_count": 25,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rank_results = retrieval_as_classification(audio_caption_sims_mean, audio_caption_labels)\n",
    "rank_results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Results calculated for 4840 queries\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'R1': 3.739669421487603,\n",
       " 'R5': 13.90495867768595,\n",
       " 'R10': 23.078512396694215,\n",
       " 'R50': 52.1900826446281,\n",
       " 'MedR': 46.0,\n",
       " 'MeanR': 108.36952479338844,\n",
       " 'geometric_mean_R1-R5-R10': 10.626826149512622}"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rank_results = retrieval_as_classification(audio_caption_sims_max, audio_caption_labels)\n",
    "rank_results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1.0330578512396693"
      ]
     },
     "execution_count": 27,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "100 * 50/4840"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "74d07dc07065639914d79fc95a011a49c7b91ce6bf0d82680fb06e81dd1e87b0"
  },
  "kernelspec": {
   "display_name": "Python 3.6.10 64-bit ('py36': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
