{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import re\n",
    "os.environ['TOKENIZERS_PARALLELISM'] = \"false\"\n",
    "os.environ['CUDA_VISIBLE_DEVICES'] = '2'\n",
    "import sys\n",
    "sys.path.insert(1, os.path.join(sys.path[0], './utils'))\n",
    "sys.path.insert(1, os.path.join(sys.path[0], './pytorch'))\n",
    "import numpy as np\n",
    "import argparse\n",
    "import time\n",
    "import logging\n",
    "\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "import torch.utils.data\n",
    " \n",
    "from utilities import (create_folder, get_filename, create_logging, Mixup, \n",
    "    StatisticsContainer, split_phrases, pad_or_truncate)\n",
    "from models import (Cnn14, Cnn14_no_specaug, Cnn14_no_dropout, \n",
    "    Cnn6, Cnn10, ResNet22, ResNet38, ResNet54, Cnn14_emb512, Cnn14_emb128, \n",
    "    Cnn14_emb32, MobileNetV1, MobileNetV2, LeeNet11, LeeNet24, DaiNet19, \n",
    "    Res1dNet31, Res1dNet51, Wavegram_Cnn14, Wavegram_Logmel_Cnn14, \n",
    "    Wavegram_Logmel128_Cnn14, Cnn14_16k, Cnn14_8k, Cnn14_mel32, Cnn14_mel128, \n",
    "    Cnn14_mixup_time_domain, Cnn14_DecisionLevelMax, Cnn14_DecisionLevelAtt, Cnn14Bert)\n",
    "from pytorch_utils import (move_data_to_device, count_parameters, count_flops, \n",
    "    do_mixup)\n",
    "from data_generator import (AudioSetDataset, TrainSampler, BalancedTrainSampler, \n",
    "    AlternateTrainSampler, EvaluateSampler, collate_fn, AudioSetBiModalDataset, get_collate_fn)\n",
    "from evaluate import Evaluator, EvaluatorBiModal\n",
    "import config\n",
    "import librosa\n",
    "import pandas as pd\n",
    "from collections import defaultdict\n",
    "from tqdm import tqdm, trange\n",
    "import pickle\n",
    "import multiprocessing\n",
    "from sklearn.metrics.pairwise import cosine_similarity\n",
    "from rank_metrics import retrieval_as_classification"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of the model checkpoint at prajjwal1/bert-medium were not used when initializing BertModel: ['cls.predictions.transform.dense.bias', 'cls.seq_relationship.bias', 'cls.predictions.decoder.weight', 'cls.predictions.transform.LayerNorm.weight', 'cls.predictions.transform.dense.weight', 'cls.predictions.decoder.bias', 'cls.predictions.transform.LayerNorm.bias', 'cls.predictions.bias', 'cls.seq_relationship.weight']\n",
      "- This IS expected if you are initializing BertModel from the checkpoint of a model trained on another task or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n",
      "- This IS NOT expected if you are initializing BertModel from the checkpoint of a model that you expect to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model).\n"
     ]
    }
   ],
   "source": [
    "model = Cnn14Bert(sample_rate=32000, window_size=1024, \n",
    "        hop_size=160, mel_bins=64, fmin=50, fmax=8000, \n",
    "        bert_model_type=\"prajjwal1/bert-medium\", \n",
    "        max_seq_len=16, shared_dim=1024)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "resume_checkpoint_path = \"/backup/data/Audioset/zelin/workspaces/audioset_tagging/checkpoints_bimodal/main_bimodal/sample_rate=32000,window_size=1024,hop_size=160,mel_bins=64,fmin=50,fmax=8000/data_type=balanced_train/Cnn14Bert/bert_type=prajjwal1/bert-medium/balanced=balanced/max_text_nums=128/max_seq_len=16/batch_size=32/100000_iterations.pth\"\n",
    "checkpoint = torch.load(resume_checkpoint_path)\n",
    "model.load_state_dict(checkpoint['model'], strict=False)\n",
    "model.cuda()\n",
    "model.eval()\n",
    "None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>youtube_id</th>\n",
       "      <th>start_time</th>\n",
       "      <th>caption</th>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>audiocap_id</th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "      <th></th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>97151</th>\n",
       "      <td>vfY_TJq7n_U</td>\n",
       "      <td>130</td>\n",
       "      <td>Rustling occurs, ducks quack and water splashe...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>108945</th>\n",
       "      <td>tdWhHV3X25Q</td>\n",
       "      <td>60</td>\n",
       "      <td>An audience gives applause as a man yells and ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>107898</th>\n",
       "      <td>tw76HGONaKg</td>\n",
       "      <td>570</td>\n",
       "      <td>A man speaks over intermittent keyboard taps</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>107893</th>\n",
       "      <td>y2bVZ7rz-5M</td>\n",
       "      <td>280</td>\n",
       "      <td>Motor noise is followed by a horn honking and ...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>107892</th>\n",
       "      <td>ti66RjZWTp0</td>\n",
       "      <td>20</td>\n",
       "      <td>A male speaks as metal clicks and a gun fires ...</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "              youtube_id  start_time  \\\n",
       "audiocap_id                            \n",
       "97151        vfY_TJq7n_U         130   \n",
       "108945       tdWhHV3X25Q          60   \n",
       "107898       tw76HGONaKg         570   \n",
       "107893       y2bVZ7rz-5M         280   \n",
       "107892       ti66RjZWTp0          20   \n",
       "\n",
       "                                                       caption  \n",
       "audiocap_id                                                     \n",
       "97151        Rustling occurs, ducks quack and water splashe...  \n",
       "108945       An audience gives applause as a man yells and ...  \n",
       "107898            A man speaks over intermittent keyboard taps  \n",
       "107893       Motor noise is followed by a horn honking and ...  \n",
       "107892       A male speaks as metal clicks and a gun fires ...  "
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "val_df = pd.read_csv(\"/home/zhiling/py3_workspace/aser_audioset/audio_Caps/val.csv\", index_col=0)\n",
    "val_df.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['group sings', 'audience gives applause', 'man yells']"
      ]
     },
     "execution_count": 5,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# tdWhHV3X25Q,60\n",
    "cap = \"An audience gives applause as a man yells and a group sings\"\n",
    "pos_phrases = split_phrases(cap)\n",
    "pos_phrases"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['audience cheers', 'man talks', 'applaud']"
      ]
     },
     "execution_count": 6,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# tdWhHV3X25Q,60\n",
    "cap = \"An audience cheers and applaud while a man talks\"\n",
    "pos_phrases2 = split_phrases(cap)\n",
    "pos_phrases2"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "['motor noise is', 'horn honking', 'siren wailing']"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# y2bVZ7rz-5M,280\n",
    "cap = \"Motor noise is follow by a horn honking and a siren wailing\"\n",
    "neg_phrases = split_phrases(cap)\n",
    "neg_phrases"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([9, 1024])"
      ]
     },
     "execution_count": 11,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "phrase_embs = model(texts=pos_phrases+pos_phrases2+neg_phrases)['text_emb']\n",
    "phrase_embs = torch.nn.functional.normalize(phrase_embs)\n",
    "phrase_embs.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.0000, 0.9872, 0.9788, 0.9828, 0.9597, 0.4988, 0.9782, 0.1449, 0.1784],\n",
       "        [0.9872, 1.0000, 0.9909, 0.9934, 0.9632, 0.4988, 0.9773, 0.1241, 0.1510],\n",
       "        [0.9788, 0.9909, 1.0000, 0.9924, 0.9555, 0.4632, 0.9756, 0.1136, 0.1389],\n",
       "        [0.9828, 0.9934, 0.9924, 1.0000, 0.9473, 0.4662, 0.9770, 0.1184, 0.1484],\n",
       "        [0.9597, 0.9632, 0.9555, 0.9473, 1.0000, 0.5793, 0.9558, 0.1602, 0.1812],\n",
       "        [0.4988, 0.4988, 0.4632, 0.4662, 0.5793, 1.0000, 0.4686, 0.2507, 0.4255],\n",
       "        [0.9782, 0.9773, 0.9756, 0.9770, 0.9558, 0.4686, 1.0000, 0.1202, 0.1445],\n",
       "        [0.1449, 0.1241, 0.1136, 0.1184, 0.1602, 0.2507, 0.1202, 1.0000, 0.3241],\n",
       "        [0.1784, 0.1510, 0.1389, 0.1484, 0.1812, 0.4255, 0.1445, 0.3241, 1.0000]],\n",
       "       device='cuda:0', grad_fn=<MmBackward>)"
      ]
     },
     "execution_count": 12,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "phrase_embs @ phrase_embs.T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "metadata": {},
   "outputs": [],
   "source": [
    "with open('results/phrase_audio_index_audiocaps.pkl', 'rb') as f:\n",
    "    cap_id2caption, cap_id2phrases, cap_id2audio_id, audio_id2filename, phrase2cap_ids, phrase_id2phrase, yid_time2fname, phrase_embeddings, audio_embeddings = pickle.load(f)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'tdWhHV3X25Q_60.000_70.000.wav'"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "yid_time2fname[('tdWhHV3X25Q', '60')]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'y2bVZ7rz-5M_280.000_290.000.wav'"
      ]
     },
     "execution_count": 16,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "yid_time2fname[('y2bVZ7rz-5M', '280')]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'x6iCUDmRpKQ_38.000_48.000.wav'"
      ]
     },
     "execution_count": 24,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "yid_time2fname[('x6iCUDmRpKQ', '38')]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_wav(full_path):\n",
    "    (waveform, _) = librosa.core.load(full_path, sr=config.sample_rate, mono=True)\n",
    "    waveform = pad_or_truncate(waveform, config.clip_samples)\n",
    "    return waveform"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [],
   "source": [
    "wav_dir = \"/backup/data/audiocaps/audio/val/\"\n",
    "waveform = load_wav(wav_dir+\"tdWhHV3X25Q_60.000_70.000.wav\")\n",
    "waveform = waveform[None, :]    # (1, audio_length)\n",
    "waveform = move_data_to_device(waveform, 'cuda')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "(torch.Size([1, 1024]), tensor(1., device='cuda:0', grad_fn=<NormBackward0>))"
      ]
     },
     "execution_count": 19,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "audio_emb = model(waveform=waveform)['audio_emb']\n",
    "audio_emb = torch.nn.functional.normalize(audio_emb)\n",
    "audio_emb.shape, audio_emb.norm()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2505, -0.2400, -0.2369, -0.2363, -0.2525, -0.2457, -0.2501, -0.1927,\n",
       "         -0.1970]], device='cuda:0', grad_fn=<MmBackward>)"
      ]
     },
     "execution_count": 20,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 甚至没有办法区分两个相差很大的句子里的phrase?\n",
    "audio_emb @ phrase_embs.T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[1.0000, 0.4818, 0.4666, 0.1125, 0.3808, 0.3518],\n",
       "        [0.4818, 1.0000, 0.6172, 0.1432, 0.3657, 0.2890],\n",
       "        [0.4666, 0.6172, 1.0000, 0.2990, 0.3406, 0.3440],\n",
       "        [0.1125, 0.1432, 0.2990, 1.0000, 0.2024, 0.3336],\n",
       "        [0.3808, 0.3657, 0.3406, 0.2024, 1.0000, 0.4316],\n",
       "        [0.3518, 0.2890, 0.3440, 0.3336, 0.4316, 1.0000]], device='cuda:0',\n",
       "       grad_fn=<MmBackward>)"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 单模态的还算合理\n",
    "phrase_embs @ phrase_embs.T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 21,
   "metadata": {},
   "outputs": [],
   "source": [
    "waveform2 = load_wav(wav_dir+\"y2bVZ7rz-5M_280.000_290.000.wav\")\n",
    "waveform2 = waveform2[None, :]    # (1, audio_length)\n",
    "waveform2 = move_data_to_device(waveform2, 'cuda')\n",
    "audio_emb2 = model(waveform=waveform2)['audio_emb']\n",
    "audio_emb2 = torch.nn.functional.normalize(audio_emb2)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.4542]], device='cuda:0', grad_fn=<MmBackward>)"
      ]
     },
     "execution_count": 22,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 不接近的样本\n",
    "audio_emb @ audio_emb2.T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "metadata": {},
   "outputs": [],
   "source": [
    "waveform3 = load_wav(wav_dir+\"x6iCUDmRpKQ_38.000_48.000.wav\")\n",
    "waveform3 = waveform3[None, :]    # (1, audio_length)\n",
    "waveform3 = move_data_to_device(waveform3, 'cuda')\n",
    "audio_emb3 = model(waveform=waveform3)['audio_emb']\n",
    "audio_emb3 = torch.nn.functional.normalize(audio_emb3)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 26,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[0.6422]], device='cuda:0', grad_fn=<MmBackward>)"
      ]
     },
     "execution_count": 26,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 接近的样本\n",
    "audio_emb @ audio_emb3.T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 36,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2631, -0.2544, -0.2513, -0.2472, -0.2828, -0.2247, -0.2774, -0.2337,\n",
       "         -0.2724]], device='cuda:0', grad_fn=<MmBackward>)"
      ]
     },
     "execution_count": 36,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "# 甚至没有办法区分两个相差很大的句子里的phrase?\n",
    "audio_emb3 @ phrase_embs.T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['audience gives applause']\n",
      "['group sings', 'audience gives applause', 'man yells', 'audience cheers', 'man talks', 'applaud', 'motor noise is', 'horn honking', 'siren wailing']\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "tensor([[-0.2631, -0.2544, -0.2513, -0.2472, -0.2828, -0.2247, -0.2774, -0.2337,\n",
       "         -0.2724]], device='cuda:0', grad_fn=<MmBackward>)"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "cap = 'An audience gives applause'\n",
    "print(split_phrases(cap))\n",
    "print(pos_phrases+pos_phrases2+neg_phrases)\n",
    "audio_emb3 @ phrase_embs.T"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 33,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "torch.Size([4, 1024])"
      ]
     },
     "execution_count": 33,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "phrase_embs2 = model(texts=['yell', 'applause', 'siren', 'Vehicle horn, car horn, honking'])['text_emb']\n",
    "phrase_embs2 = torch.nn.functional.normalize(phrase_embs2)\n",
    "phrase_embs2.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 34,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['group sings', 'audience gives applause', 'man yells']\n",
      "['yell', 'applause', 'siren', 'Vehicle horn, car horn, honking']\n",
      "tensor([[-0.1276, -0.0231, -0.2160, -0.2114]], device='cuda:0',\n",
      "       grad_fn=<MmBackward>)\n",
      "tensor([[0.2402, 0.1165, 0.2535, 0.1751],\n",
      "        [0.2385, 0.1447, 0.2208, 0.1443],\n",
      "        [0.2479, 0.1195, 0.2151, 0.1291]], device='cuda:0',\n",
      "       grad_fn=<MmBackward>)\n"
     ]
    }
   ],
   "source": [
    "# 与audioset中的原始标签的相似度比起有前缀的版本更高，也能够做出合理的区分\n",
    "# 模型对于文字中的前缀似乎非常敏感，audience gives applause与applause的相似度就很低\n",
    "print(pos_phrases)\n",
    "print(['yell', 'applause', 'siren', 'Vehicle horn, car horn, honking'])\n",
    "print(audio_emb @ phrase_embs2.T)\n",
    "print(phrase_embs[:3] @ phrase_embs2.T)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 35,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "['motor noise is', 'horn honking', 'siren wailing']\n",
      "['yell', 'applause', 'siren', 'Vehicle horn, car horn, honking']\n",
      "tensor([[-0.3009, -0.2207, -0.1412, -0.0607]], device='cuda:0',\n",
      "       grad_fn=<MmBackward>)\n",
      "tensor([[0.2268, 0.1203, 0.2360, 0.1642],\n",
      "        [0.2228, 0.1324, 0.2446, 0.8743],\n",
      "        [0.3726, 0.1946, 0.8005, 0.3829]], device='cuda:0',\n",
      "       grad_fn=<MmBackward>)\n"
     ]
    }
   ],
   "source": [
    "# 这里有无前缀的版本相似度还是比较高的\n",
    "print(neg_phrases)\n",
    "print(['yell', 'applause', 'siren', 'Vehicle horn, car horn, honking'])\n",
    "print(audio_emb2 @ phrase_embs2.T)\n",
    "print(phrase_embs[-3:] @ phrase_embs2.T)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "interpreter": {
   "hash": "74d07dc07065639914d79fc95a011a49c7b91ce6bf0d82680fb06e81dd1e87b0"
  },
  "kernelspec": {
   "display_name": "Python 3.6.10 64-bit ('py36': conda)",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.10"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
