{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import re\n",
    "os.environ['TOKENIZERS_PARALLELISM'] = \"false\"\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = '1'\n",
    "import sys\n",
    "sys.path.insert(1, os.path.join(sys.path[0], './utils'))\n",
    "sys.path.insert(1, os.path.join(sys.path[0], './pytorch'))\n",
    "import numpy as np\n",
    "import argparse\n",
    "import time\n",
    "import logging\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torch.utils.data\n",
    " \n",
    "from utilities import (create_folder, get_filename, create_logging, Mixup, \n",
    "    StatisticsContainer, split_phrases, pad_or_truncate)\n",
    "from models import (Cnn14, Cnn14_no_specaug, Cnn14_no_dropout, \n",
    "    Cnn6, Cnn10, ResNet22, ResNet38, ResNet54, Cnn14_emb512, Cnn14_emb128, \n",
    "    Cnn14_emb32, MobileNetV1, MobileNetV2, LeeNet11, LeeNet24, DaiNet19, \n",
    "    Res1dNet31, Res1dNet51, Wavegram_Cnn14, Wavegram_Logmel_Cnn14, \n",
    "    Wavegram_Logmel128_Cnn14, Cnn14_16k, Cnn14_8k, Cnn14_mel32, Cnn14_mel128, \n",
    "    Cnn14_mixup_time_domain, Cnn14_DecisionLevelMax, Cnn14_DecisionLevelAtt, Cnn14Bert)\n",
    "from pytorch_utils import (move_data_to_device, count_parameters, count_flops, \n",
    "    do_mixup)\n",
    "from data_generator import (AudioSetDataset, TrainSampler, BalancedTrainSampler, \n",
    "    AlternateTrainSampler, EvaluateSampler, collate_fn, AudioSetBiModalDataset, get_collate_fn)\n",
    "from evaluate import Evaluator, EvaluatorBiModal\n",
    "import config\n",
    "import librosa\n",
    "import pandas as pd\n",
    "from collections import defaultdict\n",
    "from tqdm import tqdm, trange\n",
    "import pickle\n",
    "import multiprocessing\n",
    "from sklearn.metrics.pairwise import cosine_similarity\n",
    "from rank_metrics import retrieval_as_classification"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at prajjwal1/bert-medium were not used when initializing BertModel: ['cls.predictions.transform.LayerNorm.weight', 'cls.seq_relationship.weight', 'cls.seq_relationship.bias', 'cls.predictions.decoder.weight', 'cls.predictions.transform.dense.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.transform.dense.weight', 'cls.predictions.bias', 'cls.predictions.decoder.bias']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "model = Cnn14Bert(sample_rate=32000, window_size=1024, \n",
    "        hop_size=160, mel_bins=64, fmin=50, fmax=8000, \n",
    "        bert_model_type=\"prajjwal1/bert-medium\", \n",
    "        max_seq_len=16, shared_dim=1024)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "<All keys matched successfully>"
      ]
     },
     "execution_count": 3,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "resume_checkpoint_path = \"/backup/data/Audioset/zelin/workspaces/audioset_tagging/checkpoints_bimodal/main_bimodal/sample_rate=32000,window_size=1024,hop_size=160,mel_bins=64,fmin=50,fmax=8000/data_type=balanced_train/Cnn14Bert/bert_type=prajjwal1/bert-medium/balanced=balanced/max_text_nums=128/max_seq_len=16/batch_size=16/50000_iterations.pth\"\n",
    "checkpoint = torch.load(resume_checkpoint_path)\n",
    "model.load_state_dict(checkpoint['model'], strict=False)\n",
    "model.cuda()\n",
    "model.eval()\n",
    "None"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "build index for Clotho"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>caption_1</th>\n",
       "      <th>caption_2</th>\n",
       "      <th>caption_3</th>\n",
       "      <th>caption_4</th>\n",
       "      <th>caption_5</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>file_name</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>Santa Motor.wav</th>\n",
       "      <td>A machine whines and squeals while rhythmicall...</td>\n",
       "      <td>A person is using electric clippers to trim bu...</td>\n",
       "      <td>Someone is trimming the bushes with electric c...</td>\n",
       "      <td>The whirring of a pump fills a bladder that tu...</td>\n",
       "      <td>While rhythmically punching or stamping, a mac...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Radio Garble.wav</th>\n",
       "      <td>A radio dispatcher and an officer are communic...</td>\n",
       "      <td>Communication with a walkie-talkie with a lot ...</td>\n",
       "      <td>A discussion with a walkie-talkie with a consi...</td>\n",
       "      <td>People talking through a walkie-talkie with ba...</td>\n",
       "      <td>The walkie-talkie the people are talking throu...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Radio Fuzz for Old Radio Broadcast FF233.wav</th>\n",
       "      <td>A radio tuner has been positioned in between r...</td>\n",
       "      <td>A transistor radio is being played on a statio...</td>\n",
       "      <td>A transistor radio is on a station that is not...</td>\n",
       "      <td>Radio static makes a constant hum with a high ...</td>\n",
       "      <td>Static coming from a radio that is in between ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>toy rattle 2.wav</th>\n",
       "      <td>A person winding up a device and then jingling...</td>\n",
       "      <td>A socket wrench that is tightening a bolt.</td>\n",
       "      <td>An object is tightened and then metallic objec...</td>\n",
       "      <td>Before keys are jangled on their chain, someon...</td>\n",
       "      <td>Someone is spinning around a lock with a dial.</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>Blade Big.wav</th>\n",
       "      <td>A person is pulling silverware out of the dish...</td>\n",
       "      <td>A person removes a knife from its holder then ...</td>\n",
       "      <td>A person taking a knife out of its holder and ...</td>\n",
       "      <td>Metal sliding together such as swords or knives.</td>\n",
       "      <td>The metallic clang of swords and knives striki...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "                                                                                      caption_1  \\\n",
       "file_name                                                                                         \n",
       "Santa Motor.wav                               A machine whines and squeals while rhythmicall...   \n",
       "Radio Garble.wav                              A radio dispatcher and an officer are communic...   \n",
       "Radio Fuzz for Old Radio Broadcast FF233.wav  A radio tuner has been positioned in between r...   \n",
       "toy rattle 2.wav                              A person winding up a device and then jingling...   \n",
       "Blade Big.wav                                 A person is pulling silverware out of the dish...   \n",
       "\n",
       "                                                                                      caption_2  \\\n",
       "file_name                                                                                         \n",
       "Santa Motor.wav                               A person is using electric clippers to trim bu...   \n",
       "Radio Garble.wav                              Communication with a walkie-talkie with a lot ...   \n",
       "Radio Fuzz for Old Radio Broadcast FF233.wav  A transistor radio is being played on a statio...   \n",
       "toy rattle 2.wav                                     A socket wrench that is tightening a bolt.   \n",
       "Blade Big.wav                                 A person removes a knife from its holder then ...   \n",
       "\n",
       "                                                                                      caption_3  \\\n",
       "file_name                                                                                         \n",
       "Santa Motor.wav                               Someone is trimming the bushes with electric c...   \n",
       "Radio Garble.wav                              A discussion with a walkie-talkie with a consi...   \n",
       "Radio Fuzz for Old Radio Broadcast FF233.wav  A transistor radio is on a station that is not...   \n",
       "toy rattle 2.wav                              An object is tightened and then metallic objec...   \n",
       "Blade Big.wav                                 A person taking a knife out of its holder and ...   \n",
       "\n",
       "                                                                                      caption_4  \\\n",
       "file_name                                                                                         \n",
       "Santa Motor.wav                               The whirring of a pump fills a bladder that tu...   \n",
       "Radio Garble.wav                              People talking through a walkie-talkie with ba...   \n",
       "Radio Fuzz for Old Radio Broadcast FF233.wav  Radio static makes a constant hum with a high ...   \n",
       "toy rattle 2.wav                              Before keys are jangled on their chain, someon...   \n",
       "Blade Big.wav                                  Metal sliding together such as swords or knives.   \n",
       "\n",
       "                                                                                      caption_5  \n",
       "file_name                                                                                        \n",
       "Santa Motor.wav                               While rhythmically punching or stamping, a mac...  \n",
       "Radio Garble.wav                              The walkie-talkie the people are talking throu...  \n",
       "Radio Fuzz for Old Radio Broadcast FF233.wav  Static coming from a radio that is in between ...  \n",
       "toy rattle 2.wav                                 Someone is spinning around a lock with a dial.  \n",
       "Blade Big.wav                                 The metallic clang of swords and knives striki...  "
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "val_df = pd.read_csv(\"/home/zhiling/py3_workspace/aser_audioset/clotho/eval.csv\", index_col=0)\n",
    "val_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "8150"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cap_id2caption = {}\n",
    "cap_id2phrases = {}\n",
    "cap_id2audio_id = {}\n",
    "# just use integer as audio_id\n",
    "audio_id2filename = val_df.index.tolist()\n",
    "unique_phrases = set()  # phrase_id2phrase = list(unique_phrases)\n",
    "phrase2cap_ids = defaultdict(list)\n",
    "for i, (rid, row) in enumerate(val_df.iterrows()):\n",
    "    for j, cap in enumerate(row):\n",
    "        cap_id = f\"a{i}_c{j}\"\n",
    "        phrases = split_phrases(cap)\n",
    "        cap_id2phrases[cap_id] = phrases\n",
    "        cap_id2caption[cap_id] = cap\n",
    "        cap_id2audio_id[cap_id] = i\n",
    "        for phrase in phrases:\n",
    "            unique_phrases.add(phrase)\n",
    "            phrase2cap_ids[phrase].append(cap_id)\n",
    "# just use integer as phrase_id\n",
    "phrase_id2phrase = list(unique_phrases)\n",
    "len(phrase2cap_ids)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "5225\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "2    2305\n",
       "1    1896\n",
       "3     826\n",
       "4     159\n",
       "5      33\n",
       "6       5\n",
       "7       1\n",
       "dtype: int64"
      ]
     },
     "execution_count": 14,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# number of phrases per caption\n",
    "print(len(cap_id2caption))\n",
    "pd.Series([len(x) for x in cap_id2phrases.values()]).value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "1     7267\n",
       "2      663\n",
       "3       97\n",
       "4       37\n",
       "5       26\n",
       "7       12\n",
       "6       12\n",
       "8        7\n",
       "10       5\n",
       "9        5\n",
       "16       4\n",
       "14       3\n",
       "13       3\n",
       "17       2\n",
       "21       2\n",
       "11       1\n",
       "32       1\n",
       "12       1\n",
       "42       1\n",
       "35       1\n",
       "dtype: int64"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# number of captions per phrase\n",
    "pd.Series([len(x) for x in phrase2cap_ids.values()]).value_counts()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 128/128 [00:01<00:00, 73.69it/s]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(8150, 1024)"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "phrase_embeddings = []\n",
    "bs = 64\n",
    "for i in trange(0, len(phrase_id2phrase), bs):\n",
    "    phrases_batch = phrase_id2phrase[i:i+bs]\n",
    "    text_emb = model(texts=phrases_batch)['text_emb']\n",
    "    phrase_embeddings.append(text_emb.detach().cpu().numpy())\n",
    "phrase_embeddings = np.concatenate(phrase_embeddings, 0)\n",
    "phrase_embeddings.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 39,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_wav(full_path):\n",
    "    (waveform, _) = librosa.core.load(full_path, sr=config.sample_rate, mono=True)\n",
    "    waveform = pad_or_truncate(waveform, config.clip_samples)\n",
    "    return waveform"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 50,
   "metadata": {},
   "outputs": [],
   "source": [
    "def multiprocess_load_wav(wav_dir, num_process=8):\n",
    "    pool = multiprocessing.Pool(num_process)\n",
    "    fnames = [wav_dir+fname for fname in os.listdir(wav_dir)]\n",
    "    waveforms = list(tqdm(pool.imap(load_wav, fnames), total=len(fnames)))\n",
    "    waveforms = np.stack(waveforms, 0)\n",
    "    return waveforms, os.listdir(wav_dir)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 51,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 1045/1045 [02:35<00:00,  6.73it/s]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(1045, 320000)"
      ]
     },
     "execution_count": 51,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "wav_dir = \"/home/zhiling/py3_workspace/aser_audioset/zelin_dev/clotho/eval/\"\n",
    "waveforms, fnames = multiprocess_load_wav(wav_dir, num_process=8)\n",
    "waveforms.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 52,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[755, 194, 797, 656, 393]\n"
     ]
    }
   ],
   "source": [
    "filename2audio_id = {x:i for i, x in enumerate(audio_id2filename)}\n",
    "reindex = [filename2audio_id[fname] for fname in fnames]\n",
    "print(reindex[:5])\n",
    "waveforms = waveforms[reindex, :]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 69,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 131/131 [00:16<00:00,  7.97it/s]\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "(1045, 1024)"
      ]
     },
     "execution_count": 69,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_embeddings = []\n",
    "bs = 8\n",
    "for i in trange(0, len(waveforms), bs):\n",
    "    audio_batch = move_data_to_device(waveforms[i:i+bs], 'cuda')\n",
    "    audio_emb = model(waveform=audio_batch)['audio_emb']\n",
    "    audio_embeddings.append(audio_emb.detach().cpu().numpy())\n",
    "audio_embeddings = np.concatenate(audio_embeddings, 0)\n",
    "audio_embeddings.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 55,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('results/phrase_audio_index.pkl', 'wb') as f:\n",
    "    pickle.dump([cap_id2caption, cap_id2phrases, cap_id2audio_id, audio_id2filename, phrase2cap_ids, phrase_id2phrase, phrase_embeddings, audio_embeddings], f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(1045, 8150)"
      ]
     },
     "execution_count": 70,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_phrase_sims = cosine_similarity(audio_embeddings, phrase_embeddings)\n",
    "audio_phrase_sims.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 80,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([ 0.05234778,  0.00089582, -0.00273662,  0.04303572, -0.00654328],\n",
       "      dtype=float32)"
      ]
     },
     "execution_count": 80,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_phrase_sims[:5, :].max(1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 71,
   "metadata": {},
   "outputs": [],
   "source": [
    "audio_id2cap_ids = [set() for i in range(len(audio_id2filename))]\n",
    "for cid, aid in cap_id2audio_id.items():\n",
    "    audio_id2cap_ids[aid].add(cid)\n",
    "phrase2phrase_id = {x:i for i, x in enumerate(phrase_id2phrase)}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 72,
   "metadata": {},
   "outputs": [],
   "source": [
    "all_cap_ids = sorted(cap_id2phrases.keys())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "audio->caption"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 73,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 1045/1045 [03:17<00:00,  5.29it/s]\n"
     ]
    }
   ],
   "source": [
    "audio_caption_labels = np.zeros((len(audio_id2cap_ids), len(all_cap_ids)))\n",
    "audio_caption_sims_mean = np.zeros((len(audio_id2cap_ids), len(all_cap_ids)))\n",
    "audio_caption_sims_max = np.zeros((len(audio_id2cap_ids), len(all_cap_ids)))\n",
    "\n",
    "for aid, sims in enumerate(tqdm(audio_phrase_sims)):\n",
    "    pos_cap_ids = audio_id2cap_ids[aid]\n",
    "    for j, cap_id in enumerate(all_cap_ids):\n",
    "        phrases = cap_id2phrases[cap_id]\n",
    "        audio_caption_labels[aid, j] = int(cap_id in pos_cap_ids)\n",
    "        phrase_sims = []\n",
    "        for phrase in phrases:\n",
    "            phrase_id = phrase2phrase_id[phrase]\n",
    "            phrase_sims.append(sims[phrase_id])\n",
    "        audio_caption_sims_mean[aid, j] = np.mean(phrase_sims)\n",
    "        audio_caption_sims_max[aid, j] = np.mean(phrase_sims)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 77,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(1045, 5225)"
      ]
     },
     "execution_count": 77,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_caption_sims_mean.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([[-0.28699306, -0.30970177, -0.31332442, -0.19859627, -0.28699303],\n",
       "       [-0.22997598, -0.22550556, -0.22257912, -0.17793767, -0.22997598],\n",
       "       [-0.20930588, -0.15741803, -0.1799491 , -0.18942618, -0.20930588],\n",
       "       [-0.24430053, -0.21228805, -0.1715581 , -0.29021728, -0.24430053],\n",
       "       [-0.1801554 , -0.25939173, -0.26267448, -0.11993854, -0.1801554 ]])"
      ]
     },
     "execution_count": 78,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_caption_sims_mean[:5,:5]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 75,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Results calculated for 5225 queries\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "{'R1': 0.1339712918660287,\n",
       " 'R5': 0.6507177033492823,\n",
       " 'R10': 1.3205741626794258,\n",
       " 'R50': 5.645933014354067,\n",
       " 'MedR': 530.0,\n",
       " 'MeanR': 521.3481339712919,\n",
       " 'geometric_mean_R1-R5-R10': 0.48646961704570363}"
      ]
     },
     "execution_count": 75,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "rank_results = retrieval_as_classification(audio_caption_sims_mean, audio_caption_labels)\n",
    "rank_results"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "74d07dc07065639914d79fc95a011a49c7b91ce6bf0d82680fb06e81dd1e87b0"
  },
  "kernelspec": {
   "display_name": "Python 3.6.10 64-bit ('py36': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
