{
 "metadata": {
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9-final"
  },
  "orig_nbformat": 2,
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3",
   "language": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2,
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "sys.version_info(major=3, minor=6, micro=9, releaselevel='final', serial=0)\nmatplotlib 3.3.4\nnumpy 1.19.5\npandas 1.1.5\nsklearn 0.24.1\ntorch 1.7.1+cu110\n"
     ]
    }
   ],
   "source": [
    "import os\n",
    "import sys\n",
    "import time\n",
    "import copy\n",
    "import random\n",
    "import PIL.Image\n",
    "\n",
    "import matplotlib as mpl\n",
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "mpl.rcParams['figure.figsize'] = (12, 12)\n",
    "mpl.rcParams['axes.grid'] = False\n",
    "plt.ion()  # interactive mode\n",
    "\n",
    "import sklearn\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "import torch\n",
    "import torchvision\n",
    "from torchvision import datasets, models, transforms\n",
    "from torch.utils.data import Dataset, TensorDataset, DataLoader\n",
    "\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.optim import lr_scheduler\n",
    "\n",
    "from models.Net import LTE, WideResNet50\n",
    "from dataloader.dataloader import load_data\n",
    "\n",
    "print(sys.version_info)\n",
    "for module in mpl, np, pd, sklearn, torch:\n",
    "    print(module.__name__, module.__version__)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "cuda\n"
     ]
    }
   ],
   "source": [
    "# Prevent error：image file is truncated (# bytes not processed)\n",
    "from PIL import ImageFile\n",
    "ImageFile.LOAD_TRUNCATED_IMAGES = True\n",
    "\n",
    "# GPU configurations\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "cpu = torch.device(\"cpu\")\n",
    "print(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "input count:  100\ntarget count:  12184\n"
     ]
    }
   ],
   "source": [
    "# Load data\n",
    "data = load_data()\n",
    "input_count = data[\"input_count\"]\n",
    "target_count = data[\"target_count\"]\n",
    "inp_ds = data[\"inp_ds\"]\n",
    "tar_dl = data[\"tar_dl\"]\n",
    "\n",
    "# Pick a model\n",
    "model = LTE(requires_grad=False, use_pretrained=False)\n",
    "# model = WideResNet50(class_name=list(range(7)), use_pretrained=False)\n",
    "model = model.to(device)"
   ]
  },
  {
   "source": [
    "# Search"
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# def relevance(inpv, tarv, lte_model):  # support target batch\n",
    "#     inpv_unfold = F.unfold(inpv, kernel_size=(3, 3), padding=1)\n",
    "#     tarv_unfold = F.unfold(tarv, kernel_size=(3, 3), padding=1)\n",
    "#     tarv_unfold = tarv_unfold.permute(0, 2, 1)\n",
    "\n",
    "#     tarv_unfold = F.normalize(tarv_unfold, dim=2)  # [N, Hr*Wr, C*k*k] [1, 1024, 2304]\n",
    "#     inpv_unfold = F.normalize(inpv_unfold, dim=1)  # [N, C*k*k, H*W]   [1, 2304, 256]\n",
    "#     return torch.matmul(tarv_unfold, inpv_unfold)  # [N, Hr*Wr, H*W]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "elapsed:  5.927788972854614\nA1982.png\n"
     ]
    },
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "            key                              val\n",
       "350   A2256.jpg  tensor(7.3034, device='cuda:0')\n",
       "269   A1983.jpg  tensor(7.3244, device='cuda:0')\n",
       "185  12 (5).png  tensor(7.3524, device='cuda:0')\n",
       "38    A2234.jpg  tensor(7.3551, device='cuda:0')\n",
       "419   A1929.jpg  tensor(7.4023, device='cuda:0')"
      ],
      "text/html": "<div>\n<style scoped>\n    .dataframe tbody tr th:only-of-type {\n        vertical-align: middle;\n    }\n\n    .dataframe tbody tr th {\n        vertical-align: top;\n    }\n\n    .dataframe thead th {\n        text-align: right;\n    }\n</style>\n<table border=\"1\" class=\"dataframe\">\n  <thead>\n    <tr style=\"text-align: right;\">\n      <th></th>\n      <th>key</th>\n      <th>val</th>\n    </tr>\n  </thead>\n  <tbody>\n    <tr>\n      <th>350</th>\n      <td>A2256.jpg</td>\n      <td>tensor(7.3034, device='cuda:0')</td>\n    </tr>\n    <tr>\n      <th>269</th>\n      <td>A1983.jpg</td>\n      <td>tensor(7.3244, device='cuda:0')</td>\n    </tr>\n    <tr>\n      <th>185</th>\n      <td>12 (5).png</td>\n      <td>tensor(7.3524, device='cuda:0')</td>\n    </tr>\n    <tr>\n      <th>38</th>\n      <td>A2234.jpg</td>\n      <td>tensor(7.3551, device='cuda:0')</td>\n    </tr>\n    <tr>\n      <th>419</th>\n      <td>A1929.jpg</td>\n      <td>tensor(7.4023, device='cuda:0')</td>\n    </tr>\n  </tbody>\n</table>\n</div>"
     },
     "metadata": {},
     "execution_count": 9
    }
   ],
   "source": [
    "# Single search\n",
    "inp, inp_name = inp_ds[1]\n",
    "inp = torch.unsqueeze(inp.to(device), dim=0)\n",
    "inp = model(inp)                        # [1, 256, 16, 16]\n",
    "inpv = F.unfold(inp, kernel_size=(3, 3), padding=1)\n",
    "\n",
    "# Getting the features through the model takes the most time\n",
    "# Then are the unfold time and the matmul time\n",
    "# Calculating the max and update the data structure take the least\n",
    "\n",
    "def query_tardl(inpv, tar_dl):\n",
    "    pairing = {}\n",
    "    for tar, tar_name in tar_dl:\n",
    "        tar = model(tar.to(device))      # [b, 256, 32, 32]\n",
    "\n",
    "        tarv = F.unfold(tar, kernel_size=(3, 3), padding=1, stride=3)\n",
    "        tarv = tarv.permute(0, 2, 1)\n",
    "\n",
    "        tarv = F.normalize(tarv, dim=2)  # [N, Hr*Wr, C*k*k] [1, 1024, 2304]\n",
    "        inpv = F.normalize(inpv, dim=1)  # [N, C*k*k, H*W]   [1, 2304, 256]\n",
    "        rel = torch.matmul(tarv, inpv)   # [b, 1024, 256]\n",
    "\n",
    "        maxrel, _ = torch.max(rel, dim=1)   # [b, 256]\n",
    "        maxrel = torch.linalg.norm(maxrel, dim=1)\n",
    "        pairing.update(dict(zip(tar_name, maxrel)))\n",
    "    return pairing\n",
    "\n",
    "start = time.time()\n",
    "pairing = query_tardl(inpv, tar_dl)\n",
    "print(\"elapsed: \", time.time() - start)\n",
    "\n",
    "keys, vals = zip(*pairing.items())\n",
    "pairing = pd.DataFrame.from_dict({\"key\": keys, \"val\": vals})\n",
    "print(inp_name)\n",
    "\n",
    "k = 5\n",
    "topk = np.argpartition(pairing.val, -k)[-k:]\n",
    "pairing.loc[topk]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "elapsed:  214.85364031791687\nhits:  81\ntop-10 acc: 0.8100\n"
     ]
    }
   ],
   "source": [
    "k = 10\n",
    "hits = 0\n",
    "since = time.time()\n",
    "for query, query_name in inp_ds:\n",
    "    query = torch.unsqueeze(query, dim=0)\n",
    "    query = model(query.to(device))       # [1, 256, 16, 16]\n",
    "    query = F.unfold(query, kernel_size=(3, 3), padding=1, stride=3)\n",
    "    pairing = query_tardl(query, tar_dl)\n",
    "\n",
    "    keys, vals = zip(*pairing.items())\n",
    "    pairing = pd.DataFrame.from_dict({\"key\": keys, \"val\": vals})\n",
    "    topk = np.argpartition(pairing.val, -k)[-k:]\n",
    "\n",
    "    if query_name.split('.')[0] in \\\n",
    "        [name.split('.')[0] for name in pairing.loc[topk][\"key\"]]:\n",
    "        hits += 1\n",
    "\n",
    "print(\"elapsed: \", time.time() - since)\n",
    "print(\"hits: \", hits)\n",
    "print(\"top-{} acc: {:.4f}\".format(k, hits/input_count))"
   ]
  },
  {
   "source": [
    "# Vectorized search "
   ],
   "cell_type": "markdown",
   "metadata": {}
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "elapsed:  139.52707147598267\n"
     ]
    }
   ],
   "source": [
    "since = time.time()\n",
    "saved = torch.load(\"../saved/lte_features.pth\")\n",
    "tar_names = saved[\"tar_names\"]\n",
    "targets = saved[\"targets\"]      # already unfolded + normalized\n",
    "print(\"elapsed: \", time.time() - since)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "torch.Size([12184, 256, 32, 32])\nelapsed:  53.40705060958862\n"
     ]
    }
   ],
   "source": [
    "# Precompute the features and concat\n",
    "since = time.time()\n",
    "tar_names = [name for _, names in tar_dl for name in names]  # [n]\n",
    "targets = torch.cat([model(tar.to(device)).cpu()\n",
    "                     for tar, _ in tar_dl], dim=0)  # [n, 256, 32, 32]\n",
    "print(targets.shape)\n",
    "print(\"elapsed: \", time.time() - since)\n",
    "\n",
    "# Precompute the features\n",
    "targets = F.unfold(targets, kernel_size=(3, 3), padding=1, stride=3)\n",
    "targets = targets.permute(0, 2, 1)\n",
    "targets = F.normalize(targets, dim=2)  # [N, Hr*Wr, C*k*k] [n, 1024, 2304]\n",
    "print(targets.shape)\n",
    "\n",
    "# Save the precomputed result\n",
    "# PATH = \"../saved/lte_features.pth\"\n",
    "# torch.save({\n",
    "#     \"tar_names\": tar_names,\n",
    "#     \"targets\": targets\n",
    "# }, PATH)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Single image query\n",
    "def single_query(query, targets, tar_names):\n",
    "    with torch.no_grad():\n",
    "        query = F.unfold(query, kernel_size=(3, 3), padding=1, stride=2)\n",
    "        # targets = F.unfold(targets, kernel_size=(3, 3), padding=1, stride=3)\n",
    "        # targets = targets.permute(0, 2, 1)\n",
    "        # normalize\n",
    "        # targets = F.normalize(targets, dim=2)  # [N, Hr*Wr, C*k*k] [n, 1024, 2304]\n",
    "        query = F.normalize(query, dim=1)      # [N, C*k*k, H*W]   [n, 2304, 256]\n",
    "        # relevance\n",
    "        rel = torch.matmul(targets, query)     # [N, Hr*Wr, H*W]   [n, 1024, 256]\n",
    "        maxrel, _ = torch.max(rel, dim=1)      # [n, 256]\n",
    "        maxrel = torch.norm(maxrel, dim=1)     # [n]\n",
    "        return pd.DataFrame({\"key\": tar_names, \"val\": maxrel.cpu().numpy()})\n",
    "\n",
    "\n",
    "# since = time.time()\n",
    "# query, query_name = inp_ds[1]\n",
    "# query = torch.unsqueeze(query, dim=0)\n",
    "# query = model(query.to(device))                # [1, 256, 16, 16]\n",
    "# pairing = single_query(query, targets, tar_names)\n",
    "\n",
    "# print(\"elapsed: \", time.time() - since)\n",
    "# print(\"query: \", query_name)\n",
    "\n",
    "# k = 5\n",
    "# topk = np.argpartition(pairing.val, -k)[-k:]\n",
    "# pairing.loc[topk]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [
    {
     "output_type": "error",
     "ename": "RuntimeError",
     "evalue": "CUDA out of memory. Tried to allocate 6.69 GiB (GPU 0; 7.80 GiB total capacity; 2.93 MiB already allocated; 6.11 GiB free; 22.00 MiB reserved in total by PyTorch)",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mRuntimeError\u001b[0m                              Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-13-28a09b2f28ef>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m\u001b[0m\n\u001b[1;32m     10\u001b[0m     \u001b[0mquery\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mmodel\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mquery\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mto\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdevice\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m)\u001b[0m       \u001b[0;31m# [1, 256, 16, 16]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     11\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 12\u001b[0;31m     \u001b[0mpairing\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0msingle_query\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mquery\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtargets\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mtar_names\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     13\u001b[0m     \u001b[0mtopk\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mnp\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0margpartition\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mpairing\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mval\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m-\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m-\u001b[0m\u001b[0mk\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     14\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m<ipython-input-12-2d5189750bb5>\u001b[0m in \u001b[0;36msingle_query\u001b[0;34m(query, targets, tar_names)\u001b[0m\n\u001b[1;32m      9\u001b[0m         \u001b[0mquery\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnormalize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mquery\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m      \u001b[0;31m# [N, C*k*k, H*W]   [n, 2304, 256]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     10\u001b[0m         \u001b[0;31m# relevance\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 11\u001b[0;31m         \u001b[0mrel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmatmul\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtargets\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mquery\u001b[0m\u001b[0;34m)\u001b[0m     \u001b[0;31m# [N, Hr*Wr, H*W]   [n, 1024, 256]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m     12\u001b[0m         \u001b[0mmaxrel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0m_\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmax\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mrel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m      \u001b[0;31m# [n, 256]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m     13\u001b[0m         \u001b[0mmaxrel\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mtorch\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnorm\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmaxrel\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m1\u001b[0m\u001b[0;34m)\u001b[0m     \u001b[0;31m# [n]\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mRuntimeError\u001b[0m: CUDA out of memory. Tried to allocate 6.69 GiB (GPU 0; 7.80 GiB total capacity; 2.93 MiB already allocated; 6.11 GiB free; 22.00 MiB reserved in total by PyTorch)"
     ]
    }
   ],
   "source": [
    "\"\"\"\n",
    "Assume: target data are loaded already\n",
    "Will compute with all targets at the same time (careful with cuda memory!) \n",
    "\"\"\"\n",
    "k = 20\n",
    "hits = 0\n",
    "\n",
    "since = time.time()\n",
    "for query, query_name in inp_ds:\n",
    "    query = torch.unsqueeze(query, dim=0)\n",
    "    query = model(query.to(device))       # [1, 256, 16, 16]\n",
    "\n",
    "    pairing = single_query(query, targets, tar_names)\n",
    "    topk = np.argpartition(pairing.val, -k)[-k:]\n",
    "\n",
    "    if query_name.split('.')[0] in \\\n",
    "        [name.split('.')[0] for name in pairing.loc[topk][\"key\"]]:\n",
    "        hits += 1\n",
    "\n",
    "print(\"elapsed: \", time.time() - since)\n",
    "print(\"hits: \", hits)\n",
    "print(\"top-{} acc: {:.4f}\".format(k, hits/input_count))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "torch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([512, 121, 2304])\ntorch.Size([408, 121, 2304])\n"
     ]
    }
   ],
   "source": [
    "batch_size = 512\n",
    "\n",
    "targets.shape[0] // 512\n",
    "remainder = targets.shape[0] % 512\n",
    "\n",
    "for ind in range(0, targets.shape[0], batch_size):\n",
    "    print(targets[ind:ind+max(batch_size, remainder)].shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "output_type": "execute_result",
     "data": {
      "text/plain": [
       "torch.Size([12184, 121, 2304])"
      ]
     },
     "metadata": {},
     "execution_count": 8
    }
   ],
   "source": [
    "targets.shape"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {
    "tags": []
   },
   "outputs": [
    {
     "output_type": "stream",
     "name": "stdout",
     "text": [
      "elapsed:  3.453705310821533\nhits:  0\ntop-3 acc: 0.0000\n"
     ]
    }
   ],
   "source": [
    "\"\"\"\n",
    "Compute with targets batch-by-batch\n",
    "\"\"\"\n",
    "k = 3\n",
    "hits = 0\n",
    "batch_size = 1024\n",
    "\n",
    "since = time.time()\n",
    "for query, query_name in inp_ds:\n",
    "    query = torch.unsqueeze(query, dim=0)\n",
    "    query = model(query.to(device))                 # [1, 256, 16, 16]\n",
    "\n",
    "    pairing = pd.DataFrame({\"key\": [], \"val\": []})\n",
    "    for ind in range(0, targets.shape[0], batch_size):\n",
    "        b_targets = targets[ind:ind+max(batch_size, remainder)]\n",
    "        b_targets = b_targets.to(device)\n",
    "        b_tarnames = tar_names[ind:ind+max(batch_size, remainder)]\n",
    "        pairing = pd.concat(\n",
    "            [pairing, single_query(query, b_targets, b_tarnames)])\n",
    "\n",
    "    pairing.index = range(0, pairing.index.size)  # rearrange the indices\n",
    "    topk = np.argpartition(pairing.val, -k)[-k:]\n",
    "    if query_name.split('.')[0] in \\\n",
    "        [name.split('.')[0] for name in pairing.loc[topk][\"key\"]]:\n",
    "        hits += 1\n",
    "    break\n",
    "\n",
    "print(\"elapsed: \", time.time() - since)\n",
    "print(\"hits: \", hits)\n",
    "print(\"top-{} acc: {:.4f}\".format(k, hits/input_count))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ]
}