{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "8a548821",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2021-10-01T18:45:31.137073Z",
     "iopub.status.busy": "2021-10-01T18:45:31.051286Z",
     "iopub.status.idle": "2021-10-01T18:45:32.610820Z",
     "shell.execute_reply": "2021-10-01T18:45:32.609794Z",
     "shell.execute_reply.started": "2021-10-01T18:37:09.132366Z"
    },
    "papermill": {
     "duration": 1.573016,
     "end_time": "2021-10-01T18:45:32.611006",
     "exception": false,
     "start_time": "2021-10-01T18:45:31.037990",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "\"\"\"Landmark retrieval offline code for ILR2021.\"\"\"\n",
    "\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import torch\n",
    "from tqdm import tqdm\n",
    "\n",
    "import pickle\n",
    "import os\n",
    "import time\n",
    "import sys\n",
    "sys.path.append('../input/util-code/local_matching')\n",
    "from local_matching import load_superpointglue_model, generate_superpoint_superglue, get_num_inliers, get_total_score\n",
    "from superpointglue_util import get_whole_cached_num_inliers, save_whole_cached_num_inliers\n",
    "\n",
    "\n",
    "# pylint: disable=not-callable, invalid-name, line-too-long\n",
    "# ########################## Cell 2 Configs ##################################\n",
    "\n",
    "MODE = 'retrieval'\n",
    "setting = {\n",
    "        # ############# General params ############\n",
    "        'IMAGE_DIR': '../input/landmark-retrieval-2021/',\n",
    "        'RAW_IMAGE_DIR': '/home/gongyou.zyq/datasets/google_landmark/',\n",
    "        'OUTPUT_TEMP_DIR': f'../temp/{MODE}/',    # Will lose after reset\n",
    "        'OUTPUT_DIR': f'../working/{MODE}/',    # Will be saved after reset\n",
    "        'MODEL_DIR': '../input/models/',\n",
    "        'META_DIR': '../input/meta-data-final/',\n",
    "        'PROBE_DIR': '../input/landmark-retrieval-2021/test/',\n",
    "        'INDEX_DIR': '../input/landmark-retrieval-2021/index/',\n",
    "        'FEAT_DIR': f'../temp/{MODE}/features/',\n",
    "        'SIMS_DIR': f'../temp/{MODE}/sims/',\n",
    "        'SAMPLE_TEST_NUM': 1129,\n",
    "        # ############# ReID params ############\n",
    "        'REID_EXTRACT_FLAG': True,\n",
    "        'FP16': True,\n",
    "        'DEBUG_FLAG': False,\n",
    "        'MULTI_SCALE_FEAT': False,\n",
    "        # ############# ReID model list ############\n",
    "        # 'MODEL_LIST': ['R50', 'R101ibn', 'RXt101ibn', 'SER101ibn', 'ResNeSt101', 'ResNeSt269', 'EffNetB7'],\n",
    "        # 'MODEL_LIST': ['SER101ibn'],\n",
    "        'MODEL_LIST': ['SER101ibn', 'RXt101ibn', 'ResNeSt101', 'ResNeSt269'],\n",
    "        'MODEL_WEIGHT': [1.0, 1.0, 1.0, 1.0],\n",
    "         #'MODEL_WEIGHT': [1.0,],\n",
    "        'IMAGE_SIZE': 512,    # 256, 384, 448, 512\n",
    "        'BATCH_SIZE': 32,\n",
    "        'MODEL_PARAMS': {'R50': {'MODEL_NAME': 'R50_256.pth', 'BACKBONE': 'resnet50'},\n",
    "                         'R101ibn': {'MODEL_NAME': 'R101ibn_384_finetune_c2x.pth', 'BACKBONE': 'resnet101_ibn_a'},\n",
    "                         'RXt101ibn': {'MODEL_NAME': 'RXt101ibn_512_all.pth', 'BACKBONE': 'resnext101_ibn_a'},\n",
    "                         'SER101ibn': {'MODEL_NAME': 'SER101ibn_512_all.pth', 'BACKBONE': 'se_resnet101_ibn_a'},\n",
    "                         'ResNeSt101': {'MODEL_NAME': 'ResNeSt101_512_all.pth', 'BACKBONE': 'resnest101'},\n",
    "                         'ResNeSt269': {'MODEL_NAME': 'ResNeSt269_512_all.pth', 'BACKBONE': 'resnest269'},\n",
    "                         'EffNetB7': {'MODEL_NAME': 'efficientnet-b7_20_512_3796.pth', 'BACKBONE': 'efficientnet-b7'},\n",
    "                         },\n",
    "        # ############# Rerank params ############\n",
    "        'KR_FLAG': False,\n",
    "        'K1': 10,\n",
    "        'K2': 3,\n",
    "        'INITIAL_RANK_FILE': f'../temp/{MODE}/initial_rank.npy',\n",
    "        'NAME_LIST_FILE': f'../temp/{MODE}/name_list.pkl',\n",
    "        'EUC_DIST_DIR': f'../temp/{MODE}/euc_dist/',\n",
    "        'GRAPH_DIST_DIR': f'../temp/{MODE}/graph_dist/',\n",
    "        'QE_DIST_DIR': f'../temp/{MODE}/qe_dist/',\n",
    "        'JACCARD_DIR': f'../temp/{MODE}/jaccard/',\n",
    "        'LAMBDA': 0.3,\n",
    "        # ############# Category Rerank ############\n",
    "        'CATEGORY_RERANK': 'before_merge',    # after_merge, before_merge or off\n",
    "        'VOTE_NUM': 3,    # Soft voting seems not work\n",
    "        'REF_SET_EXTRACT': False,    # Just need to cache once\n",
    "        'REF_ALL_LIST': '../input/meta-data-all/cache_all_list.pkl',\n",
    "        'REF_SET_LIST': '../input/meta-data-final/cache_index_train_list.pkl',    # full, index_train, all\n",
    "        'REF_SET_META': f'../temp/{MODE}/ref_meta.pkl',\n",
    "        'REF_SET_FEAT': '../input/meta-data-final/ref_feats.pkl',\n",
    "        'REF_LOC_MAP': '../input/meta-data-final/gbid2country.pkl',\n",
    "        'CATEGORY_THR': -1.0,\n",
    "        'alpha': 1.0,\n",
    "        'beta': 0.1,\n",
    "        # ############ LocalMatching Rerank ############\n",
    "        'LOCAL_MATCHING': 'off',    # 'spg' or 'off'\n",
    "        'SPG_MODEL_DIR': '../input/models/local_matching',\n",
    "        'SPG_CACHE_DIR': f'../temp/{MODE}/local_matching_cache',\n",
    "        'SPG_RERANK_NUM': 10,    # rerank length, larger is better\n",
    "        'LOCAL_WEIGHT': 0.15,\n",
    "        'MAX_INLIERS': 90,\n",
    "        'SPG_DO_CACHE': True,    # wheather save inliers cache or not.\n",
    "        }\n",
    "\n",
    "\n",
    "# ########################## Cell 8 Get output file  #########################\n",
    "\n",
    "def slice_jaccard(probe_feat, topk_index_feats):\n",
    "    \"\"\"Kr rerank for only top-k index feats.\"\"\"\n",
    "\n",
    "    query_num = 1\n",
    "    gallery_num = len(topk_index_feats)\n",
    "    all_num = query_num + gallery_num\n",
    "    concat_feat = torch.cat([probe_feat, topk_index_feats])\n",
    "    cos_sim = torch.matmul(concat_feat, concat_feat.T)    # (101, 101)\n",
    "    original_dist = 1.0 - (cos_sim + 1.0)/2\n",
    "    initial_rank = torch.argsort(original_dist, dim=1)\n",
    "    initial_rank = initial_rank.cpu().numpy()\n",
    "    original_dist = original_dist.cpu().numpy()\n",
    "    # print(f'Memory usage: {psutil.virtual_memory().percent}')\n",
    "    V = np.zeros((all_num, all_num))\n",
    "    gallery_num = original_dist.shape[0]\n",
    "\n",
    "    k1 = setting['K1']\n",
    "    k2 = setting['K2']\n",
    "    for i in range(all_num):\n",
    "        # k-reciprocal neighbors\n",
    "        forward_k_neigh_index = initial_rank[i,:k1+1]\n",
    "        backward_k_neigh_index = initial_rank[forward_k_neigh_index,:k1+1]\n",
    "        fi = np.where(backward_k_neigh_index==i)[0]\n",
    "        k_reciprocal_index = forward_k_neigh_index[fi]\n",
    "        k_reciprocal_expansion_index = k_reciprocal_index\n",
    "        for j in range(len(k_reciprocal_index)):\n",
    "            candidate = k_reciprocal_index[j]\n",
    "            candidate_forward_k_neigh_index = initial_rank[candidate,:int(np.around(k1/2.))+1]\n",
    "            candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index,:int(np.around(k1/2.))+1]\n",
    "            fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]\n",
    "            candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]\n",
    "            if len(np.intersect1d(candidate_k_reciprocal_index,k_reciprocal_index))> 2./3*len(candidate_k_reciprocal_index):\n",
    "                k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index,candidate_k_reciprocal_index)\n",
    "\n",
    "        k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)\n",
    "        weight = np.exp(-original_dist[i,k_reciprocal_expansion_index])\n",
    "        V[i,k_reciprocal_expansion_index] = weight/np.sum(weight)\n",
    "    original_dist = original_dist[:query_num,]\n",
    "    if k2 != 1:\n",
    "        V_qe = np.zeros_like(V,dtype=np.float32)\n",
    "        for i in range(all_num):\n",
    "            V_qe[i,:] = np.mean(V[initial_rank[i,:k2],:],axis=0)\n",
    "        V = V_qe\n",
    "        del V_qe\n",
    "    del initial_rank\n",
    "    invIndex = []\n",
    "    for i in range(gallery_num):\n",
    "        invIndex.append(np.where(V[:,i] != 0)[0])\n",
    "\n",
    "    jaccard_dist = np.zeros_like(original_dist,dtype = np.float32)\n",
    "\n",
    "    for i in range(query_num):\n",
    "        temp_min = np.zeros(shape=[1,gallery_num],dtype=np.float32)\n",
    "        indNonZero = np.where(V[i,:] != 0)[0]\n",
    "        indImages = []\n",
    "        indImages = [invIndex[ind] for ind in indNonZero]\n",
    "        for j in range(len(indNonZero)):\n",
    "            temp_min[0,indImages[j]] = temp_min[0,indImages[j]]+ np.minimum(V[i,indNonZero[j]],V[indImages[j],indNonZero[j]])\n",
    "        jaccard_dist[i] = 1-temp_min/(2-temp_min)\n",
    "\n",
    "    # final_dist = jaccard_dist*(1-lambda_value) + original_dist*lambda_value\n",
    "    del original_dist\n",
    "    del V\n",
    "    slice_jaccard = jaccard_dist[:query_num,query_num:].flatten()\n",
    "    return torch.tensor(slice_jaccard).cuda().half()\n",
    "\n",
    "\n",
    "def kr_rerank_fast(probe_feats, index_feats):\n",
    "    \"\"\"Memory efficient rerank.\n",
    "\n",
    "    probe_feats and index_feats are in gpu tensor.\n",
    "    \"\"\"\n",
    "\n",
    "    # print('Starting KR re_ranking')\n",
    "    topk = 30\n",
    "    fast_lambda = 0.5\n",
    "    cos_sims = torch.matmul(probe_feats, index_feats.T)\n",
    "    original_dists = 1.0 - (cos_sims + 1.0)/2\n",
    "    query_num = len(probe_feats)\n",
    "    gallery_num = len(index_feats)\n",
    "    final_dists = torch.zeros((query_num, gallery_num)).cuda().half()\n",
    "    # for i, probe_feat in enumerate(tqdm(probe_feats)):\n",
    "    for i, probe_feat in enumerate(probe_feats):\n",
    "        q_sim = cos_sims[i]\n",
    "        _, top_indices = torch.topk(q_sim, topk)\n",
    "        q_neighbour_feats = index_feats[top_indices]\n",
    "        q_neighbour_sims = torch.matmul(q_neighbour_feats, index_feats.T)    # (topk, 400w)\n",
    "        _, neighbour_top_indices = torch.topk(q_neighbour_sims, topk)    # (topk, topk)\n",
    "        neighbour_top_indices = neighbour_top_indices.flatten()\n",
    "        top_indices = torch.cat([top_indices, neighbour_top_indices])\n",
    "        top_indices = torch.unique(top_indices)\n",
    "        # print(q_neighbour_sims.shape, neighbour_top_indices.shape, top_indices.shape)\n",
    "        topk_index_feats = index_feats[top_indices]\n",
    "        jaccard_dist = slice_jaccard(probe_feat[None, :], topk_index_feats)\n",
    "        expand_jaccard = torch.ones((gallery_num,)).cuda().half()\n",
    "        expand_jaccard[top_indices] = jaccard_dist\n",
    "        final_dists[i] = original_dists[i] * fast_lambda\n",
    "        final_dists[i] += expand_jaccard * (1-fast_lambda)\n",
    "    return 1.0 - final_dists\n",
    "\n",
    "\n",
    "def merge_tags(tags_list, scores, weight):\n",
    "    if len(tags_list)==1 or scores is None:\n",
    "        return tags_list[-1]\n",
    "\n",
    "    tags = torch.tensor(tags_list)\n",
    "    scores = torch.tensor(scores)\n",
    "    if weight is not None:\n",
    "        weight = torch.tensor(weight)\n",
    "    print(tags.shape, scores.shape)\n",
    "    #print(tags_list.shape)\n",
    "    #tags = torch.stack(tags_list, dim=0)\n",
    "    merged_tags = []\n",
    "    count = 0\n",
    "    for i in range(tags.shape[1]):\n",
    "        preds = torch.unique(tags[:, i])\n",
    "        if weight is not None:\n",
    "            score = scores[:, i] * weight\n",
    "        else:\n",
    "            score = scores[:, i]\n",
    "        if len(preds) == 1:\n",
    "            merged_tags.append(preds[0])\n",
    "        elif len(preds) == tags.shape[0]:\n",
    "            merged_tags.append(-1)\n",
    "        else:\n",
    "            unique_score_list = []\n",
    "            for item in preds:\n",
    "                sum_score = torch.sum(score[tags[:, i] == item])\n",
    "                unique_score_list.append(sum_score)\n",
    "            unique_score_list = torch.tensor(unique_score_list)\n",
    "            best_index = torch.argmax(unique_score_list)\n",
    "            merged_tags.append(preds[best_index])\n",
    "            #print(f\"{tags[:, i]}->{preds[best_index]}\")\n",
    "            #print(f\"{score}\")\n",
    "            count += 1\n",
    "    print(f'{count} low constancy tags')\n",
    "    return merged_tags\n",
    "\n",
    "def get_probe_tags_index(probe_feats, index_feats, index_tags, mode='avg'):\n",
    "\n",
    "    probe_tags = []\n",
    "    probe_scores = []\n",
    "    if mode == 'avg':\n",
    "        tag_mean_feats = []\n",
    "        index_tags_unique = torch.unique(index_tags)\n",
    "        for index_tag in index_tags_unique:\n",
    "            same_tags = torch.where(index_tags == index_tag)\n",
    "            same_tag_feats = torch.mean(index_feats[same_tags], dim=0, keepdim=True)\n",
    "            same_tag_feats = same_tag_feats / torch.norm(same_tag_feats, 2, 1)\n",
    "            tag_mean_feats.append(same_tag_feats)\n",
    "        tag_mean_feats = torch.cat(tag_mean_feats, dim=0)\n",
    "\n",
    "        for probe_index, query_feat in enumerate(tqdm(probe_feats)):\n",
    "            sim = torch.matmul(tag_mean_feats, query_feat[:, None]).flatten()\n",
    "            _, indices = torch.topk(sim, 1)\n",
    "            probe_tag = index_tags_unique[indices[0]]\n",
    "            probe_tags.append(probe_tag)\n",
    "            probe_scores.append(sim[indices[0]])\n",
    "            print(sim[indices[0]])\n",
    "    elif mode == 'single':\n",
    "        for probe_index, query_feat in enumerate(tqdm(probe_feats)):\n",
    "            sim = torch.matmul(index_feats, query_feat[:, None]).flatten()\n",
    "            _, indices = torch.topk(sim, 1)\n",
    "            probe_tag = index_tags[indices[0]]\n",
    "            probe_tags.append(probe_tag)\n",
    "            probe_scores.append(sim[indices[0]])\n",
    "\n",
    "    return probe_tags, probe_scores\n",
    "\n",
    "def get_probe_tags_avg(probe_feats, ref_info):\n",
    "    globalid2refindex = ref_info['globalid2refindex']\n",
    "    ref_feats = ref_info['ref_feats']\n",
    "    category_mean_feats = []\n",
    "    for globalid in sorted(globalid2refindex.keys()):\n",
    "        refindexes = globalid2refindex[globalid]\n",
    "        same_id_feats = ref_feats[refindexes]\n",
    "        same_id_feats = torch.mean(same_id_feats, dim=0, keepdim=True)\n",
    "        same_id_feats = same_id_feats / torch.norm(same_id_feats, 2, 1)\n",
    "        category_mean_feats.append(same_id_feats)\n",
    "    category_mean_feats = torch.cat(category_mean_feats, dim=0)\n",
    "    probe_tags = []\n",
    "\n",
    "    print(f'computing probe tags, total probes:{probe_feats.shape[0]}, refs: {ref_feats.shape[0]}')\n",
    "    for probe_index, query_feat in enumerate(tqdm(probe_feats)):\n",
    "        ref_sim = torch.matmul(category_mean_feats, query_feat[:, None]).flatten()\n",
    "        _, ref_indices = torch.topk(ref_sim, setting['VOTE_NUM'])\n",
    "        pred_global_id = ref_indices[0]\n",
    "        probe_tags.append(pred_global_id)\n",
    "\n",
    "    return probe_tags\n",
    "\n",
    "\n",
    "def get_probe_tags_topk(probe_feats, ref_info):\n",
    "    \"\"\"Get topk probe tags in gpu tensor.\"\"\"\n",
    "\n",
    "    ref_gbid = ref_info['ref_gbid']\n",
    "    ref_loc = ref_info['ref_loc']\n",
    "    ref_feats = ref_info['ref_feats']\n",
    "    probe_tags = []\n",
    "    probe_tag_scores = []\n",
    "    probe_locs = []\n",
    "    probe_loc_scores = []\n",
    "    print(f'computing probe tags, total probes:{probe_feats.shape[0]}, refs: {ref_feats.shape[0]}')\n",
    "    for probe_index, query_feat in enumerate(tqdm(probe_feats)):\n",
    "        # ref_sim = torch.matmul(ref_feats, query_feat[:, None]).flatten()\n",
    "        ref_sim = kr_rerank_fast(query_feat[None, :], ref_feats).flatten()\n",
    "        _, ref_indices = torch.topk(ref_sim, setting['VOTE_NUM'])\n",
    "\n",
    "        id_list = ref_gbid[ref_indices]\n",
    "        loc_list = ref_loc[ref_indices]\n",
    "        score_list = ref_sim[ref_indices]\n",
    "        unique_id_list = torch.unique(id_list)\n",
    "        unique_loc_list = torch.unique(loc_list)\n",
    "        id_score_list = []\n",
    "        loc_score_list = []\n",
    "        for item in unique_id_list:\n",
    "            indexes = torch.where(id_list == item)[0]\n",
    "            sum_score = torch.sum(score_list[indexes])\n",
    "            id_score_list.append(sum_score)\n",
    "        for item in unique_loc_list:\n",
    "            indexes = torch.where(loc_list == item)[0]\n",
    "            sum_score = torch.sum(score_list[indexes])\n",
    "            loc_score_list.append(sum_score)\n",
    "\n",
    "        id_score_list = torch.tensor(id_score_list)\n",
    "        id_score_list = id_score_list / torch.sum(id_score_list)\n",
    "\n",
    "        loc_score_list = torch.tensor(loc_score_list)\n",
    "        loc_score_list = loc_score_list / torch.sum(loc_score_list)\n",
    "\n",
    "        probe_tags.append(unique_id_list)\n",
    "        probe_tag_scores.append(id_score_list)\n",
    "        probe_locs.append(unique_loc_list)\n",
    "        probe_loc_scores.append(loc_score_list)\n",
    "\n",
    "    return probe_tags, probe_tag_scores, probe_locs, probe_loc_scores\n",
    "\n",
    "def get_probe_tags(probe_feats, ref_info):\n",
    "    ref_gbid = []\n",
    "    refindex2globalid = ref_info['refindex2globalid']\n",
    "    for refindex in refindex2globalid:\n",
    "        ref_gbid.append(refindex2globalid[refindex])\n",
    "    ref_gbid = torch.tensor(ref_gbid).cuda()\n",
    "\n",
    "    ref_feats = ref_info['ref_feats']\n",
    "    probe_tags = []\n",
    "    probe_scores = []\n",
    "\n",
    "    print(f'computing probe tags, total probes:{probe_feats.shape[0]}, refs: {ref_feats.shape[0]}')\n",
    "    for probe_index, query_feat in enumerate(tqdm(probe_feats)):\n",
    "        ref_sim = torch.matmul(ref_feats, query_feat[:, None]).flatten()\n",
    "        # ref_sim = kr_rerank_fast(query_feat[None, :], ref_feats).flatten()\n",
    "        _, ref_indices = torch.topk(ref_sim, setting['VOTE_NUM'])\n",
    "\n",
    "        pred_id_list = []\n",
    "        pred_score_list = []\n",
    "        for ref_index in ref_indices:\n",
    "            pred_score = ref_sim[ref_index]\n",
    "            pred_global_id = ref_gbid[ref_index]\n",
    "            pred_id_list.append(pred_global_id)\n",
    "            pred_score_list.append(pred_score)\n",
    "        pred_id_list = torch.tensor(pred_id_list)\n",
    "        pred_score_list = torch.tensor(pred_score_list)\n",
    "\n",
    "        if len(torch.unique(pred_id_list)) == 1:\n",
    "            # This is often the case\n",
    "            pred_global_id = pred_id_list[0]\n",
    "            score = torch.sum(pred_score_list)\n",
    "        else:\n",
    "            unique_id_list = torch.unique(pred_id_list)\n",
    "            unique_score_list = []\n",
    "            for item in unique_id_list:\n",
    "                indexes = torch.where(pred_id_list == item)[0]\n",
    "                sum_score = torch.sum(pred_score_list[indexes])\n",
    "                unique_score_list.append(sum_score)\n",
    "            unique_score_list = torch.tensor(unique_score_list)\n",
    "            best_index = torch.argmax(unique_score_list)\n",
    "            pred_global_id = unique_id_list[best_index]\n",
    "            score = unique_score_list[best_index]\n",
    "        probe_tags.append(pred_global_id)\n",
    "        probe_scores.append(score)\n",
    "    return probe_tags, probe_scores\n",
    "\n",
    "\n",
    "def get_index_tags(index_feats, ref_info, batch_size=128):\n",
    "\n",
    "    ref_feats = ref_info['ref_feats']\n",
    "    ref_gbid = ref_info['ref_gbid']\n",
    "    ref_loc = ref_info['ref_loc']\n",
    "\n",
    "    print(f'computing index tags, total index:{index_feats.shape[0]}, refs: {ref_feats.shape[0]}')\n",
    "    num_batches = len(index_feats) / batch_size + 1\n",
    "    num_batches = int(num_batches)\n",
    "    index_gbid = []\n",
    "    index_locs = []\n",
    "    for batch_idx in tqdm(range(num_batches)):\n",
    "        batch_data = index_feats[batch_idx*batch_size:(batch_idx+1)*batch_size]\n",
    "        ref_sim = torch.matmul(batch_data, ref_feats.T)\n",
    "        _, ref_indices = torch.topk(ref_sim, 1, dim=1)\n",
    "        ref_indices = ref_indices.flatten()    # (batch_size, )\n",
    "        pred_global_id = ref_gbid[ref_indices]    # (batch_size, )\n",
    "        pred_loc = ref_loc[ref_indices]\n",
    "        index_gbid.append(pred_global_id)\n",
    "        index_locs.append(pred_loc)\n",
    "    index_gbid = torch.cat(index_gbid)    # (num_index, )\n",
    "    index_locs = torch.cat(index_locs)\n",
    "\n",
    "    return index_gbid, index_locs\n",
    "\n",
    "def rerank_tag_and_loc(sim, probe_tags, probe_locs, probe_tag_scores, probe_loc_scores, index_tags, index_locs, alpha=1.0, beta=1.0):\n",
    "\n",
    "\n",
    "    for idx, (probe_tag, probe_tag_score) in enumerate(zip(probe_tags, probe_tag_scores)):\n",
    "        good_tag_indexes = torch.where(index_tags == probe_tag)\n",
    "\n",
    "        sim = recomputing_sim(sim, good_tag_indexes, probe_tag_score, alpha)\n",
    "\n",
    "    for idx, (probe_loc, probe_loc_score) in enumerate(zip(probe_locs, probe_loc_scores)):\n",
    "        good_loc_indexes = torch.where(index_locs == probe_loc)\n",
    "\n",
    "        sim = recomputing_sim(sim, good_loc_indexes, probe_loc_score, beta)\n",
    "\n",
    "\n",
    "    return sim\n",
    "\n",
    "def recomputing_sim(sim, indexes, score, weight):\n",
    "\n",
    "    sim[indexes] += weight * score\n",
    "\n",
    "    return sim\n",
    "\n",
    "def category_rerank_after_merge(sims, probe_tags, probe_locs, probe_tag_scores, probe_loc_scores, index_tags, index_locs, sim_thr=0.1, alpha=1.0, beta=0.1):\n",
    "    \"\"\"Category rerank.\"\"\"\n",
    "\n",
    "    print('Category Reranking after merge......')\n",
    "    print(f'Category Thr is {sim_thr}')\n",
    "    rerank_sims = torch.zeros_like(sims)\n",
    "    print(f'rerank sims by {alpha}, {beta}')\n",
    "    for probe_index, (probe_tag, probe_loc, probe_tag_score, probe_loc_score) in enumerate(tqdm(zip(probe_tags, probe_locs, probe_tag_scores, probe_loc_scores))):\n",
    "\n",
    "        # print(probe_tag, probe_loc, probe_tag_score, probe_loc_score)\n",
    "\n",
    "        raw_sim = sims[probe_index].flatten()\n",
    "        rerank_sims[probe_index] = rerank_tag_and_loc(raw_sim,\n",
    "                                                      probe_tag, probe_loc, probe_tag_score, probe_loc_score,\n",
    "                                                      index_tags, index_locs,\n",
    "                                                      alpha=alpha, beta=beta)\n",
    "    return rerank_sims\n",
    "\n",
    "\n",
    "def category_rerank_before_merge(probe_feats, index_feats, ref_info):\n",
    "    \"\"\"Category rerank.\"\"\"\n",
    "\n",
    "    print('Category Reranking before merge......')\n",
    "    ref_feats = ref_info['ref_feats']\n",
    "    print('ref, ', ref_feats.shape)\n",
    "    rerank_sims = np.zeros((len(probe_feats), len(index_feats)),\n",
    "                           dtype=np.float32)\n",
    "    index_gbid = []\n",
    "    ref_gbid = []\n",
    "    refindex2globalid = ref_info['refindex2globalid']\n",
    "    for refindex in refindex2globalid:\n",
    "        ref_gbid.append(refindex2globalid[refindex])\n",
    "    ref_gbid = torch.tensor(ref_gbid).cuda()\n",
    "    print('Get label for each index image')\n",
    "    batch_size = 128\n",
    "    num_batches = len(index_feats) / batch_size + 1\n",
    "    num_batches = int(num_batches)\n",
    "    for batch_idx in tqdm(range(num_batches)):\n",
    "        batch_data = index_feats[batch_idx*batch_size:(batch_idx+1)*batch_size]\n",
    "        ref_sim = torch.matmul(batch_data, ref_feats.T)\n",
    "        _, ref_indices = torch.topk(ref_sim, 1, dim=1)\n",
    "        ref_indices = ref_indices.flatten()    # (batch_size, )\n",
    "        pred_global_id = ref_gbid[ref_indices]    # (batch_size, )\n",
    "        index_gbid.append(pred_global_id)\n",
    "    index_gbid = torch.cat(index_gbid)    # (num_index, )\n",
    "\n",
    "    for probe_index, query_feat in enumerate(tqdm(probe_feats)):\n",
    "        ref_sim = torch.matmul(ref_feats, query_feat[:, None]).flatten()\n",
    "        _, ref_indices = torch.topk(ref_sim, setting['VOTE_NUM'])\n",
    "\n",
    "        pred_id_list = []\n",
    "        pred_score_list = []\n",
    "        for ref_index in ref_indices:\n",
    "            pred_score = ref_sim[ref_index]\n",
    "            pred_global_id = ref_gbid[ref_index]\n",
    "            pred_id_list.append(pred_global_id)\n",
    "            pred_score_list.append(pred_score)\n",
    "        pred_id_list = torch.tensor(pred_id_list)\n",
    "        pred_score_list = torch.tensor(pred_score_list)\n",
    "\n",
    "        if len(torch.unique(pred_id_list)) == 1:\n",
    "            # This is often the case\n",
    "            pred_global_id = pred_id_list[0]\n",
    "        else:\n",
    "            unique_id_list = torch.unique(pred_id_list)\n",
    "            unique_score_list = []\n",
    "            for item in unique_id_list:\n",
    "                indexes = torch.where(pred_id_list == item)[0]\n",
    "                sum_score = torch.sum(pred_score_list[indexes])\n",
    "                unique_score_list.append(sum_score)\n",
    "            unique_score_list = torch.tensor(unique_score_list)\n",
    "            best_index = torch.argmax(unique_score_list)\n",
    "            pred_global_id = unique_id_list[best_index]\n",
    "            # print(pred_id_list, pred_score_list, pred_global_id)\n",
    "\n",
    "        raw_sim = torch.matmul(index_feats, query_feat[:, None]).flatten()\n",
    "        raw_orders = torch.argsort(-raw_sim)\n",
    "        raw_orders = raw_orders.cpu().numpy()\n",
    "        good_indexes = torch.where(index_gbid == pred_global_id)[0]\n",
    "        good_indexes = good_indexes.cpu().numpy()\n",
    "        match_indexes = np.in1d(raw_orders, good_indexes)\n",
    "        pos_list = list(raw_orders[match_indexes])\n",
    "        neg_list = list(raw_orders[~match_indexes])\n",
    "        #pos_list = list(good_indexes)\n",
    "        #neg_list = list(np.arange(index_feats.shape[0])[~np.in1d(np.arange(index_feats.shape[0]), good_indexes)])\n",
    "        merged_list = pos_list + neg_list\n",
    "        dummpy_sim = np.arange(len(merged_list)) / float(len(merged_list))\n",
    "        dummpy_sim = 1.0 - dummpy_sim\n",
    "        rerank_sims[probe_index, merged_list] = dummpy_sim\n",
    "    return rerank_sims\n",
    "\n",
    "\n",
    "def category_expansion(probe_feats, index_feats, ref_info):\n",
    "    \"\"\"Category query expansion.\"\"\"\n",
    "\n",
    "    ref_feats = ref_info['ref_feats']\n",
    "    rerank_sims = np.zeros((len(probe_feats), len(index_feats)),\n",
    "                           dtype=np.float32)\n",
    "    for probe_index, query_feat in enumerate(tqdm(probe_feats)):\n",
    "        globalid2refindex = ref_info['globalid2refindex']\n",
    "        refindex2globalid = ref_info['refindex2globalid']\n",
    "        ref_sim = torch.matmul(ref_feats, query_feat[:, None]).flatten()\n",
    "        _, ref_indices = torch.topk(ref_sim, setting['VOTE_NUM'])\n",
    "        ref_indices = ref_indices.cpu().numpy()\n",
    "        same_cat_indexes = []\n",
    "        for ref_index in ref_indices:\n",
    "            pred_global_id = refindex2globalid[ref_index]\n",
    "            same_cat_indexes.append(globalid2refindex[pred_global_id])\n",
    "        same_cat_indexes = np.concatenate(same_cat_indexes)\n",
    "        same_cat_indexes = torch.tensor(same_cat_indexes)\n",
    "        # print(same_cat_indexes)\n",
    "        cat_feats = ref_feats[same_cat_indexes]\n",
    "        # print(f'{len(cat_feats)} ref images with same cat, {cat_feats.shape}')\n",
    "        cat2pred_sim = torch.matmul(cat_feats, index_feats.T)    # (C, index_num)\n",
    "        cat2pred_sim, _ = torch.max(cat2pred_sim, dim=0)    # (index_num, )\n",
    "        # print(cat2pred_sim.shape, cat2pred_sim.max())\n",
    "        good_indexes = (cat2pred_sim > 0.6).nonzero()\n",
    "        good_indexes = good_indexes.cpu().numpy().flatten()\n",
    "        # back to index_name_list\n",
    "        # good_names = index_name_list[good_indexes]\n",
    "        # print(f'good names: {good_names}')\n",
    "\n",
    "        # 2019 GLR retrieval rerank\n",
    "        raw_sim = torch.matmul(index_feats, query_feat[:, None]).flatten()\n",
    "        raw_orders = torch.argsort(-raw_sim)\n",
    "        raw_orders = raw_orders.cpu().numpy()\n",
    "        match_indexes = np.in1d(raw_orders, good_indexes)\n",
    "        pos_list = list(raw_orders[match_indexes])\n",
    "        neg_list = list(raw_orders[~match_indexes])\n",
    "\n",
    "        merged_list = pos_list + neg_list\n",
    "        # merged_list = list(raw_orders)\n",
    "        dummpy_sim = np.arange(len(merged_list)) / float(len(merged_list))\n",
    "        dummpy_sim = 1.0 - dummpy_sim\n",
    "        rerank_sims[probe_index, merged_list] = dummpy_sim\n",
    "        \"\"\"\n",
    "        # simply query expansion max sims.\n",
    "        # rerank_sims[probe_index] = cat2pred_sim.cpu().numpy()\n",
    "\n",
    "        # QE average query features\n",
    "        # qe_feats = ref_feats[same_cat_indexes]\n",
    "        # qe_feats = torch.mean(qe_feats, dim=0, keepdim=True)    # (1, 512)\n",
    "        # qe_feats = qe_feats / torch.norm(qe_feats, 2, 1)\n",
    "        # qe2index_sim = torch.matmul(qe_feats, index_feats.T)    # (1, index_num)\n",
    "        # rerank_sims[probe_index] = qe2index_sim.flatten().cpu().numpy()\n",
    "        \"\"\"\n",
    "    return rerank_sims\n",
    "\n",
    "\n",
    "def rerank_local_matching(spg_model, num_inliers_dict, probe_name, probe_dir, index_name_list, index_dir, sims, local_weight, max_inliers, cache_dir, do_cache, ignore_global_score=False):\n",
    "    if do_cache:\n",
    "        if not os.path.exists(cache_dir):\n",
    "            os.makedirs(cache_dir)\n",
    "\n",
    "    probe_path = f'{probe_dir}/{probe_name[0]}/{probe_name[1]}/{probe_name[2]}/{probe_name}.jpg'\n",
    "    scores = []\n",
    "    probe_image_cache = {}\n",
    "    keypoint_time = 0\n",
    "    spp_time = 0\n",
    "    spg_time = 0\n",
    "    matching_time = 0\n",
    "    for idx, index_name in enumerate(index_name_list):\n",
    "        index_path = f'{index_dir}/{index_name[0]}/{index_name[1]}/{index_name[2]}/{index_name}.jpg'\n",
    "\n",
    "        if (probe_name, index_name) not in num_inliers_dict:\n",
    "            start = time.time()\n",
    "            pred, spp_t, spg_t = generate_superpoint_superglue(probe_path, probe_name, index_path, index_name,\n",
    "                                                 spg_model, cache_dir, False, probe_image_cache)\n",
    "\n",
    "            spp_time += spp_t\n",
    "            spg_time += spg_t\n",
    "            end_keypoint = time.time()\n",
    "            keypoint_time += (end_keypoint - start)\n",
    "\n",
    "            num_inliers = get_num_inliers(pred)\n",
    "            matching_time += (time.time() - end_keypoint)\n",
    "\n",
    "            num_inliers_dict[(probe_name, index_name)] = num_inliers\n",
    "        else:\n",
    "            num_inliers = num_inliers_dict.get((probe_name, index_name))\n",
    "        if ignore_global_score:\n",
    "            total_score = get_total_score(num_inliers, 0.)\n",
    "        else:\n",
    "            total_score = get_total_score(num_inliers, sims[idx], weight=local_weight, max_inlier_score=max_inliers)\n",
    "\n",
    "        if False and idx % 9 == 0 and idx != 0:\n",
    "            print(f\"time of extract keypoints: {keypoint_time/idx}\")\n",
    "            print(f\"time of extract SPP keypoints: {spp_time/idx}\")\n",
    "            print(f\"time of matching SPG: {spg_time/idx}\")\n",
    "            print(f\"time of matching: {matching_time/idx}\")\n",
    "        scores.append(total_score)\n",
    "\n",
    "    #if do_cache:\n",
    "    #    save_whole_cached_num_inliers(cache_dir, num_inliers_dict)\n",
    "    scores = np.asarray(scores)\n",
    "    rerank_sort = np.argsort(scores)[::-1]\n",
    "    return index_name_list[rerank_sort]\n",
    "\n",
    "\n",
    "\n",
    "def write_csv(probe_name_list, index_name_list, sims):\n",
    "    \"\"\"Write csv files for submission.\"\"\"\n",
    "\n",
    "    if setting['LOCAL_MATCHING'] == 'spg':\n",
    "        spg_model = load_superpointglue_model(setting['SPG_MODEL_DIR'])\n",
    "        num_inliers_dict = get_whole_cached_num_inliers(setting['SPG_CACHE_DIR'])\n",
    "        #num_inliers_dict = {}\n",
    "        rerank_num = setting['SPG_RERANK_NUM']\n",
    "    index_name_list = np.array(index_name_list)\n",
    "    id_list = []\n",
    "    res_list = []\n",
    "    print('Start output csv files')\n",
    "    for probe_index, probe_name in enumerate(tqdm(probe_name_list)):\n",
    "        id_list.append(probe_name)\n",
    "        sim = sims[probe_index]\n",
    "        orders = np.argsort(-sim)\n",
    "        if setting['LOCAL_MATCHING'] == 'spg':\n",
    "            sorted_name_list_topk = rerank_local_matching(spg_model, num_inliers_dict,\n",
    "                                                        probe_name, setting['PROBE_DIR'],\n",
    "                                                        index_name_list[orders[:rerank_num]], setting['INDEX_DIR'],\n",
    "                                                        sim[orders[:rerank_num]], setting['LOCAL_WEIGHT'], setting['MAX_INLIERS'],\n",
    "                                                        cache_dir=setting['SPG_CACHE_DIR'], do_cache=setting['SPG_DO_CACHE'])\n",
    "            sorted_name_list = sorted_name_list_topk.tolist() + index_name_list[orders[rerank_num:100]].tolist()\n",
    "        else:\n",
    "            sorted_name_list = index_name_list[orders[:100]]\n",
    "        res_str = ''\n",
    "        for item in sorted_name_list:\n",
    "            res_str += item + ' '\n",
    "        res_str = res_str[:-1]\n",
    "        res_list.append(res_str)\n",
    "    # pylint: disable=invalid-name\n",
    "    if setting['LOCAL_MATCHING']=='spg' and setting['SPG_DO_CACHE']:\n",
    "        save_whole_cached_num_inliers(setting['SPG_CACHE_DIR'], num_inliers_dict)\n",
    "    df = pd.DataFrame({'id': id_list, 'images': res_list})\n",
    "    df.to_csv('submission.csv', index=False)\n",
    "    print('Finish output csv files')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "7d3f980d",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2021-10-01T18:45:32.641626Z",
     "iopub.status.busy": "2021-10-01T18:45:32.630891Z",
     "iopub.status.idle": "2021-10-01T18:45:34.969369Z",
     "shell.execute_reply": "2021-10-01T18:45:34.969920Z"
    },
    "papermill": {
     "duration": 2.353605,
     "end_time": "2021-10-01T18:45:34.970118",
     "exception": false,
     "start_time": "2021-10-01T18:45:32.616513",
     "status": "completed"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Total 1 gpu cards with 17071734784 memory\n",
      "Init Memory usage: 5.3\n",
      "load image Memory usage: 5.3\n",
      "Dummy submission!\n"
     ]
    }
   ],
   "source": [
    "\"\"\"Base code for ILR2021\"\"\"\n",
    "\n",
    "import gc\n",
    "import gzip\n",
    "import os\n",
    "import pickle\n",
    "import shutil\n",
    "import sys\n",
    "import time\n",
    "\n",
    "import cv2\n",
    "import psutil\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.cuda import amp\n",
    "from torch.utils.data import Dataset\n",
    "from torch.utils.data import DataLoader\n",
    "from tqdm import tqdm\n",
    "import numpy as np\n",
    "from collections import OrderedDict\n",
    "\n",
    "sys.path.append('../input/util-code/')\n",
    "from make_model import make_model\n",
    "\n",
    "# pylint: disable=not-callable, invalid-name, line-too-long\n",
    "# ########################## Cell 1 Basic module test ########################\n",
    "\n",
    "try:\n",
    "    assert torch.cuda.is_available()\n",
    "    gpu_num = torch.cuda.device_count()\n",
    "    assert gpu_num > 0\n",
    "    gpu_memory = torch.cuda.get_device_properties(0).total_memory\n",
    "    print(f'Total {gpu_num} gpu cards with {gpu_memory} memory')\n",
    "except AssertionError:\n",
    "    print('Fail to set gpu')\n",
    "\n",
    "# ########################## Cell 3 Load all image list  #####################\n",
    "\n",
    "\n",
    "def load_image_list():\n",
    "    \"\"\"Load image list.\"\"\"\n",
    "\n",
    "    query_count = 0\n",
    "    index_count = 0\n",
    "    all_image_list = []\n",
    "    for dirname, _, filenames in os.walk(setting['PROBE_DIR']):\n",
    "        for filename in filenames:\n",
    "            query_count += 1\n",
    "            all_image_list.append(os.path.join(dirname, filename))\n",
    "            # print(os.path.join(dirname, filename))\n",
    "    if query_count == setting['SAMPLE_TEST_NUM'] and gpu_num == 1:\n",
    "        return None, None\n",
    "    for dirname, _, filenames in os.walk(setting['INDEX_DIR']):\n",
    "        for filename in filenames:\n",
    "            index_count += 1\n",
    "            all_image_list.append(os.path.join(dirname, filename))\n",
    "            # print(os.path.join(dirname, filename))\n",
    "    print(f'query num: {query_count} and index num: {index_count}')\n",
    "    return all_image_list, query_count\n",
    "\n",
    "# ########################## Cell 4 ReID inference  ##########################\n",
    "\n",
    "\n",
    "class ImageDataset(Dataset):\n",
    "    \"\"\"Image Dataset.\"\"\"\n",
    "\n",
    "    def __init__(self, dataset, transforms):\n",
    "        _ = transforms\n",
    "        self.dataset = dataset\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.dataset)\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        img_path = self.dataset[index]\n",
    "        img = cv2.imread(img_path)\n",
    "        img = cv2.resize(img, (setting['IMAGE_SIZE'], setting['IMAGE_SIZE']))\n",
    "        img = torch.tensor(img)\n",
    "        img = img[:, :, [2, 1, 0]]\n",
    "        return img, img_path\n",
    "\n",
    "\n",
    "def val_collate_fn(batch):\n",
    "    \"\"\"Val collate fn.\"\"\"\n",
    "\n",
    "    imgs, img_paths = zip(*batch)\n",
    "    return torch.stack(imgs, dim=0), img_paths\n",
    "\n",
    "\n",
    "class ReID_Inference:\n",
    "    \"\"\"ReID Inference.\"\"\"\n",
    "\n",
    "    def __init__(self, backbone):\n",
    "\n",
    "        self.model = make_model(setting['MODEL_PARAMS'][backbone]['BACKBONE'])\n",
    "        model_name = setting['MODEL_PARAMS'][backbone]['MODEL_NAME']\n",
    "        model_path = os.path.join(setting['MODEL_DIR'], model_name)\n",
    "        self.model.load_param(model_path)\n",
    "        self.batch_size = setting['BATCH_SIZE']\n",
    "        if gpu_num > 1:\n",
    "            print(f'Using {gpu_num} gpu for inference')\n",
    "            self.model = nn.DataParallel(self.model)\n",
    "            self.batch_size = setting['BATCH_SIZE'] * gpu_num\n",
    "        self.model.to('cuda')\n",
    "        self.model.eval()\n",
    "        self.mean = torch.tensor([123.675, 116.280, 103.530]).to('cuda')\n",
    "        self.std = torch.tensor([57.0, 57.0, 57.0]).to('cuda')\n",
    "\n",
    "    def extract(self, imgpath_list):\n",
    "        \"\"\"Extract feature for one image.\"\"\"\n",
    "\n",
    "        val_set = ImageDataset(imgpath_list, None)\n",
    "\n",
    "        # NOTE: no pin_memory to save memory\n",
    "        if gpu_num > 1:\n",
    "            pin_memory = True\n",
    "            num_workers = 32\n",
    "        else:\n",
    "            pin_memory = False\n",
    "            num_workers = 2\n",
    "        val_loader = DataLoader(\n",
    "            val_set, batch_size=self.batch_size, shuffle=False,\n",
    "            num_workers=num_workers, collate_fn=val_collate_fn,\n",
    "            pin_memory=pin_memory\n",
    "        )\n",
    "\n",
    "        batch_res_dic = OrderedDict()\n",
    "        for (batch_data, batch_path) in tqdm(val_loader,\n",
    "                                             total=len(val_loader)):\n",
    "            with torch.no_grad():\n",
    "                batch_data = batch_data.to('cuda')\n",
    "                batch_data = (batch_data - self.mean) / self.std\n",
    "                batch_data = batch_data.permute(0, 3, 1, 2)\n",
    "                batch_data = batch_data.float()\n",
    "                if not setting['MULTI_SCALE_FEAT']:\n",
    "                    if setting['FP16']:\n",
    "                        # NOTE: DO NOT use model.half() because of underflow\n",
    "                        with amp.autocast():\n",
    "                            feat = self.model(batch_data)\n",
    "                    else:\n",
    "                        feat = self.model(batch_data)\n",
    "                else:\n",
    "                    # Ref: https://github.com/lyakaap/Landmark2019-1st-and-3rd-Place-Solution/blob/3fee857dd2b2927ede70c43bffd99b41eb394507/cirtorch/networks/imageretrievalnet.py#L309\n",
    "                    feat = torch.zeros((len(batch_data), 512),\n",
    "                                       dtype=torch.float16).cuda()\n",
    "                    raw_size = batch_data.shape[2]\n",
    "                    for s in [0.707, 1.0, 1.414]:\n",
    "                        new_size = int(((raw_size * s) // 16) * 16)\n",
    "                        scale_data = nn.functional.interpolate(\n",
    "                                batch_data, size=new_size, mode='bilinear',\n",
    "                                align_corners=False)\n",
    "                        with amp.autocast():\n",
    "                            scale_feat = self.model(scale_data)\n",
    "                        feat += scale_feat\n",
    "                    feat = feat/3.0\n",
    "                feat = feat / torch.norm(feat, 2, 1, keepdim=True)\n",
    "                feat = feat.cpu().detach().numpy()\n",
    "\n",
    "            for index, imgpath in enumerate(batch_path):\n",
    "                batch_res_dic[imgpath] = feat[index]\n",
    "        del val_loader, val_set, feat, batch_data\n",
    "        return batch_res_dic\n",
    "\n",
    "\n",
    "def debug_reid_inference(image_list):\n",
    "    \"\"\"Debug reid inference.\"\"\"\n",
    "\n",
    "    reid = ReID_Inference('R50')\n",
    "    batch_res_dic = reid.extract(image_list[:20])\n",
    "    print(batch_res_dic)\n",
    "    del reid, batch_res_dic\n",
    "\n",
    "# ########################## Cell 5 Extract feature  #########################\n",
    "\n",
    "\n",
    "def save_feature(all_feature_dic, backbone):\n",
    "    \"\"\"Save feature.\"\"\"\n",
    "\n",
    "    if not os.path.exists(setting['FEAT_DIR']):\n",
    "        os.makedirs(setting['FEAT_DIR'])\n",
    "    index_name_list, index_feats = [], []\n",
    "    probe_name_list, probe_feats = [], []\n",
    "    # NOTE: attention the order! Related to probe_name_list order\n",
    "    for image_path, sample_feat in sorted(all_feature_dic.items()):\n",
    "        image_name = os.path.basename(image_path).split('.jpg')[0]\n",
    "        sample_mode = image_path.split('/')[-5]\n",
    "        if sample_mode == 'test':\n",
    "            probe_name_list.append(image_name)\n",
    "            probe_feats.append(sample_feat)\n",
    "        else:\n",
    "            index_name_list.append(image_name)\n",
    "            index_feats.append(sample_feat)\n",
    "\n",
    "    pkl_name = os.path.join(setting['FEAT_DIR'], f'probe_feats_{backbone}.pkl')\n",
    "    probe_dic = {'probe_name_list': np.array(probe_name_list),\n",
    "                 'probe_feats': np.array(probe_feats)}\n",
    "    with open(pkl_name, 'wb') as f_pkl:\n",
    "        pickle.dump(probe_dic, f_pkl, pickle.HIGHEST_PROTOCOL)\n",
    "    print('Save pickle in %s' % pkl_name)\n",
    "    pkl_name = os.path.join(setting['FEAT_DIR'], f'index_feats_{backbone}.pkl')\n",
    "    index_dic = {'index_name_list': np.array(index_name_list),\n",
    "                 'index_feats': np.array(index_feats)}\n",
    "    with open(pkl_name, 'wb') as f_pkl:\n",
    "        pickle.dump(index_dic, f_pkl, pickle.HIGHEST_PROTOCOL)\n",
    "    print('Save pickle in %s' % pkl_name)\n",
    "    all_feature_dic.clear(), probe_dic.clear(), index_dic.clear()\n",
    "    del all_feature_dic, probe_dic, index_dic, probe_feats, index_feats\n",
    "    del probe_name_list, index_name_list\n",
    "    gc.collect()\n",
    "\n",
    "\n",
    "def load_feat(mode, backbone):\n",
    "    \"\"\"Load precomputed features.\"\"\"\n",
    "\n",
    "    feat_dir = setting['FEAT_DIR']\n",
    "    with open(f'{feat_dir}/{mode}_feats_{backbone}.pkl', 'rb') as f_pkl:\n",
    "        mode_dic = pickle.load(f_pkl)\n",
    "    print(f'load {backbone} feat, memory : {psutil.virtual_memory().percent}')\n",
    "    return mode_dic[f'{mode}_name_list'], mode_dic[f'{mode}_feats']\n",
    "\n",
    "\n",
    "def save_numpy(data_path, data, save_disk_flag=True):\n",
    "    \"\"\"Save numpy.\"\"\"\n",
    "\n",
    "    if save_disk_flag:\n",
    "        # Save space but slow\n",
    "        f_data = gzip.GzipFile(f\"{data_path}.gz\", \"w\")\n",
    "        np.save(file=f_data, arr=data)\n",
    "        f_data.close()\n",
    "    else:\n",
    "        np.save(data_path, data)\n",
    "\n",
    "\n",
    "def load_numpy(data_path, save_disk_flag=True):\n",
    "    \"\"\"Load numpy.\"\"\"\n",
    "\n",
    "    if save_disk_flag:\n",
    "        # Save space but slow\n",
    "        f_data = gzip.GzipFile(f'{data_path}.gz', \"r\")\n",
    "        data = np.load(f_data)\n",
    "    else:\n",
    "        data = np.load(data_path)\n",
    "    return data\n",
    "\n",
    "\n",
    "# ########################## Cell 6 KR rerank sims  #########################\n",
    "\n",
    "\n",
    "def build_graph(initial_rank):\n",
    "    \"\"\"Build graph.\"\"\"\n",
    "\n",
    "    K1 = setting['K1']\n",
    "    if not os.path.exists(setting['GRAPH_DIST_DIR']):\n",
    "        os.makedirs(setting['GRAPH_DIST_DIR'])\n",
    "\n",
    "    torch.cuda.empty_cache()  # empty GPU memory\n",
    "    gc.collect()\n",
    "    print(f'Start build graph, memory: {psutil.virtual_memory().percent}')\n",
    "    all_num = initial_rank.shape[0]\n",
    "    for i in tqdm(range(all_num)):\n",
    "        original_dist = load_numpy(os.path.join(setting['EUC_DIST_DIR'],\n",
    "                                                f'{i:08d}.npy'),\n",
    "                                   save_disk_flag=False)\n",
    "        V = np.zeros_like(original_dist, dtype=np.float16)\n",
    "        # k-reciprocal neighbors\n",
    "        forward_k_neigh_index = initial_rank[i, :K1+1]\n",
    "        backward_k_neigh_index = initial_rank[forward_k_neigh_index, :K1+1]\n",
    "        fi = np.where(backward_k_neigh_index == i)[0]\n",
    "        k_reciprocal_index = forward_k_neigh_index[fi]\n",
    "        k_reciprocal_expansion_index = k_reciprocal_index\n",
    "        for j in range(len(k_reciprocal_index)):\n",
    "            candidate = k_reciprocal_index[j]\n",
    "            candidate_forward_k_neigh_index = initial_rank[candidate, :int(np.around(K1/2.))+1]\n",
    "            candidate_backward_k_neigh_index = initial_rank[candidate_forward_k_neigh_index, :int(np.around(K1/2.))+1]\n",
    "            fi_candidate = np.where(candidate_backward_k_neigh_index == candidate)[0]\n",
    "            candidate_k_reciprocal_index = candidate_forward_k_neigh_index[fi_candidate]\n",
    "            if len(np.intersect1d(candidate_k_reciprocal_index, k_reciprocal_index)) > 2./3*len(candidate_k_reciprocal_index):\n",
    "                k_reciprocal_expansion_index = np.append(k_reciprocal_expansion_index, candidate_k_reciprocal_index)\n",
    "\n",
    "        k_reciprocal_expansion_index = np.unique(k_reciprocal_expansion_index)\n",
    "        weight = np.exp(-original_dist[k_reciprocal_expansion_index])\n",
    "        norm_weight = weight/np.sum(weight)\n",
    "        V[k_reciprocal_expansion_index] = norm_weight\n",
    "        save_numpy(os.path.join(setting['GRAPH_DIST_DIR'], f'{i:08d}.npy'), V)\n",
    "    print(f'Finish build graph, memory: {psutil.virtual_memory().percent}')\n",
    "\n",
    "\n",
    "def expand_query(initial_rank):\n",
    "    \"\"\"Expand query.\"\"\"\n",
    "\n",
    "    K2 = setting['K2']\n",
    "    print(f'Start QE, memory usage: {psutil.virtual_memory().percent}')\n",
    "    if not os.path.exists(setting['QE_DIST_DIR']):\n",
    "        os.makedirs(setting['QE_DIST_DIR'])\n",
    "\n",
    "    all_num = len(initial_rank)\n",
    "    for i in tqdm(range(all_num)):\n",
    "        query_neighbor_list = initial_rank[i, :K2]\n",
    "        neighbor_dist_list = []\n",
    "        for j in query_neighbor_list:\n",
    "            neighbor_file = os.path.join(setting['GRAPH_DIST_DIR'],\n",
    "                                         f'{j:08d}.npy')\n",
    "            neighbor_dist = load_numpy(neighbor_file)\n",
    "            neighbor_dist_list.append(neighbor_dist)\n",
    "        neighbor_dist_list = np.array(neighbor_dist_list)\n",
    "        mean_dist = np.mean(neighbor_dist_list, axis=0)\n",
    "        save_numpy(os.path.join(setting['QE_DIST_DIR'], f'{i:08d}.npy'),\n",
    "                   mean_dist)\n",
    "    print(f'Finish QE, memory usage: {psutil.virtual_memory().percent}')\n",
    "\n",
    "\n",
    "def compute_jaccard(query_num, all_num):\n",
    "    \"\"\"Compute Jaccard distance.\"\"\"\n",
    "\n",
    "    JACCARD_DIR = setting['JACCARD_DIR']\n",
    "    QE_DIST_DIR = setting['QE_DIST_DIR']\n",
    "    if not os.path.exists(JACCARD_DIR):\n",
    "        os.makedirs(JACCARD_DIR)\n",
    "\n",
    "    gc.collect()\n",
    "    print(f'Start Jaccard, memory usage: {psutil.virtual_memory().percent}')\n",
    "\n",
    "    gal_nonzero_dic = {k: [] for k in range(all_num)}\n",
    "    prb_nonzero_dic = {k: [] for k in range(all_num)}\n",
    "    for k in range(all_num):\n",
    "        sample_dist = load_numpy(os.path.join(QE_DIST_DIR, f'{k:08d}.npy'))\n",
    "        indexes = np.where(sample_dist != 0)[0]\n",
    "        for gal in indexes:\n",
    "            if gal in gal_nonzero_dic:\n",
    "                gal_nonzero_dic[gal].append(k)\n",
    "        prb_nonzero_dic[k] = list(indexes)\n",
    "\n",
    "    invIndex = []\n",
    "    for i in range(all_num):\n",
    "        invIndex.append(gal_nonzero_dic[i])\n",
    "    for i in tqdm(range(query_num)):\n",
    "        temp_min = np.zeros(shape=[1, all_num], dtype=np.float16)\n",
    "        indNonZero = prb_nonzero_dic[i]\n",
    "        indImages = []\n",
    "        indImages = [invIndex[ind] for ind in indNonZero]\n",
    "        for j in range(len(indNonZero)):\n",
    "            temp_indNonZero_dist = load_numpy(os.path.join(QE_DIST_DIR,\n",
    "                                                           f'{i:08d}.npy'))\n",
    "            temp_indNonZero_dist = temp_indNonZero_dist[indNonZero[j]]\n",
    "            temp_indImages = indImages[j]\n",
    "            min_dist_list = []\n",
    "            for ind in temp_indImages:\n",
    "                temp_ind_dist = load_numpy(os.path.join(QE_DIST_DIR,\n",
    "                                                        f'{ind:08d}.npy'))\n",
    "                min_dist_list.append(temp_ind_dist[indNonZero[j]])\n",
    "            min_dist_list = np.array(min_dist_list)\n",
    "            temp_min[0, indImages[j]] = temp_min[0, indImages[j]] + \\\n",
    "                    np.minimum(temp_indNonZero_dist, min_dist_list)\n",
    "        jaccard_dist = 1-temp_min/(2-temp_min)\n",
    "        jaccard_dist = jaccard_dist.flatten()\n",
    "        save_numpy(os.path.join(JACCARD_DIR, f'{i:08d}.npy'), jaccard_dist)\n",
    "    print(f'Finish Jaccard, memory usage: {psutil.virtual_memory().percent}')\n",
    "\n",
    "\n",
    "def merge_sims(query_num, all_num):\n",
    "    \"\"\"Merge original dist and jaccard dist.\"\"\"\n",
    "\n",
    "    print(f'Start merge sim, memory usage: {psutil.virtual_memory().percent}')\n",
    "    EUC_DIST_DIR = setting['EUC_DIST_DIR']\n",
    "    JACCARD_DIR = setting['JACCARD_DIR']\n",
    "    LAMBDA = setting['LAMBDA']\n",
    "\n",
    "    index_num = all_num - query_num\n",
    "    merged_dist = np.zeros((query_num, index_num), dtype=np.float16)\n",
    "    for i in range(query_num):\n",
    "        original_dist = load_numpy(os.path.join(EUC_DIST_DIR, f'{i:08d}.npy'),\n",
    "                                   save_disk_flag=False)\n",
    "        jaccard_dist = load_numpy(os.path.join(JACCARD_DIR, f'{i:08d}.npy'))\n",
    "        dist = jaccard_dist*(1-LAMBDA) + original_dist*LAMBDA\n",
    "        merged_dist[i] = dist[query_num:]\n",
    "    print(f'Finish merge sim, memory usage: {psutil.virtual_memory().percent}')\n",
    "    return 1.0 - merged_dist\n",
    "\n",
    "\n",
    "def get_origin_sims(query_num, all_num):\n",
    "    \"\"\"Get origin sims.\"\"\"\n",
    "\n",
    "    print(f'Start original, memory usage: {psutil.virtual_memory().percent}')\n",
    "    EUC_DIST_DIR = setting['EUC_DIST_DIR']\n",
    "\n",
    "    index_num = all_num - query_num\n",
    "    merged_dist = np.zeros((query_num, index_num), dtype=np.float16)\n",
    "    for i in range(query_num):\n",
    "        original_dist = load_numpy(os.path.join(EUC_DIST_DIR, f'{i:08d}.npy'),\n",
    "                                   save_disk_flag=False)\n",
    "        merged_dist[i] = original_dist[query_num:]\n",
    "    print(f'Finish original, memory usage: {psutil.virtual_memory().percent}')\n",
    "    return 1.0 - merged_dist\n",
    "\n",
    "\n",
    "def cache_expand_sims(probe_feats, index_feats):\n",
    "    \"\"\"Cache expanded(query + index) sims for KR rerank.\"\"\"\n",
    "\n",
    "    if not os.path.exists(setting['EUC_DIST_DIR']):\n",
    "        os.makedirs(setting['EUC_DIST_DIR'])\n",
    "\n",
    "    query_num = probe_feats.shape[0]\n",
    "    index_num = index_feats.shape[0]\n",
    "    all_num = query_num + index_num\n",
    "    initial_rank = np.zeros((all_num, setting['K1']+10), dtype=np.int32)\n",
    "    concat_feat = torch.cat([probe_feats, index_feats])\n",
    "    del probe_feats, index_feats\n",
    "    torch.cuda.empty_cache()  # empty GPU memory\n",
    "    gc.collect()\n",
    "    print(f'Load feats memory usage: {psutil.virtual_memory().percent}')\n",
    "    for sample_index in tqdm(range(all_num)):\n",
    "        cos_sim = torch.matmul(concat_feat[sample_index][None, :],\n",
    "                               concat_feat.T)\n",
    "        # euc_dist_gpu = 2 * (1 - cos_sim)\n",
    "        # euc_dist_gpu = torch.sqrt(euc_dist_gpu)\n",
    "        # custom euc dist without norm\n",
    "        euc_dist_gpu = 1.0 - (cos_sim + 1.0)/2\n",
    "        euc_dist_gpu = euc_dist_gpu[0]\n",
    "        euc_dist_cpu = euc_dist_gpu.cpu().numpy()\n",
    "        # print(euc_dist_cpu.shape, euc_dist_cpu.max(), euc_dist_cpu.min())\n",
    "        orders = torch.argsort(euc_dist_gpu)\n",
    "        orders = orders.cpu().numpy()[:setting['K1']+10]\n",
    "        initial_rank[sample_index, :] = orders\n",
    "        save_numpy(os.path.join(setting['EUC_DIST_DIR'],\n",
    "                                f'{sample_index:08d}.npy'),\n",
    "                   euc_dist_cpu, save_disk_flag=False)\n",
    "        del cos_sim, euc_dist_gpu, euc_dist_cpu, orders\n",
    "        # print(f'Memory usage: {psutil.virtual_memory().percent}')\n",
    "    # print(initial_rank.shape)\n",
    "    save_numpy(setting['INITIAL_RANK_FILE'], initial_rank)\n",
    "    del concat_feat\n",
    "    torch.cuda.empty_cache()  # empty GPU memory\n",
    "    gc.collect()\n",
    "    print(f'Finish cache sim, memory usage: {psutil.virtual_memory().percent}')\n",
    "    return initial_rank\n",
    "\n",
    "\n",
    "def kr_rerank_disk(probe_feats, index_feats):\n",
    "    \"\"\"Memory efficient rerank.\"\"\"\n",
    "\n",
    "    print('Starting re_ranking')\n",
    "    initial_rank = cache_expand_sims(probe_feats, index_feats)\n",
    "    query_num = len(probe_feats)\n",
    "    all_num = len(initial_rank)\n",
    "    build_graph(initial_rank)\n",
    "    if setting['K2'] != 1:\n",
    "        expand_query(initial_rank)\n",
    "    print(f'Memory usage: {psutil.virtual_memory().percent}')\n",
    "    del initial_rank\n",
    "    gc.collect()\n",
    "    print(f'Memory usage: {psutil.virtual_memory().percent}')\n",
    "    compute_jaccard(query_num, all_num)\n",
    "    sims = merge_sims(query_num, all_num)\n",
    "    return sims\n",
    "\n",
    "\n",
    "# ########################## Cell 7 Refset extraction  #######################\n",
    "\n",
    "\n",
    "def load_meta(backbone):\n",
    "    \"\"\"Load meta.\"\"\"\n",
    "\n",
    "    with open(setting['REF_SET_META'], 'rb') as f_meta:\n",
    "        ref_meta = pickle.load(f_meta)\n",
    "    print(f'Load ref_set_meta, memory: {psutil.virtual_memory().percent}')\n",
    "    pkl_name = setting['REF_SET_FEAT'].replace('.pkl', f'_{backbone}.pkl')\n",
    "    refset_feat_dic = pickle.load(open(pkl_name, 'rb'))\n",
    "    print(f'Load raw ref_set_feat, memory: {psutil.virtual_memory().percent}')\n",
    "    ref_feats_gpu = []\n",
    "    ref_feats = []\n",
    "    batch_size = 128\n",
    "    for ref_name in ref_meta['ref_name_list']:\n",
    "        ref_feats.append(refset_feat_dic[ref_name])\n",
    "        if len(ref_feats) % batch_size == 0:\n",
    "            ref_feats = np.array(ref_feats)\n",
    "            ref_feats = torch.tensor(ref_feats).cuda().half()\n",
    "            ref_feats_gpu.append(ref_feats)\n",
    "            ref_feats = []\n",
    "        del refset_feat_dic[ref_name]\n",
    "    if len(ref_feats) > 0:\n",
    "        ref_feats = np.array(ref_feats)\n",
    "        ref_feats = torch.tensor(ref_feats).cuda().half()\n",
    "        ref_feats_gpu.append(ref_feats)\n",
    "    del refset_feat_dic\n",
    "    del ref_meta['ref_name_list']\n",
    "    ref_meta['ref_feats'] = torch.cat(ref_feats_gpu, dim=0)\n",
    "    print(f'Convert ref_set_feat, memory: {psutil.virtual_memory().percent}')\n",
    "\n",
    "    with open(setting['REF_LOC_MAP'], 'rb') as f:\n",
    "        loc_map = pickle.load(f)\n",
    "    ref_gbid = []\n",
    "    ref_loc = []\n",
    "    refindex2globalid = ref_meta['refindex2globalid']\n",
    "    for refindex in refindex2globalid:\n",
    "        gbid = refindex2globalid[refindex]\n",
    "        ref_gbid.append(gbid)\n",
    "        loc = loc_map[gbid]\n",
    "        ref_loc.append(loc)\n",
    "    ref_gbid = torch.tensor(ref_gbid).cuda()\n",
    "    ref_loc = torch.tensor(ref_loc).cuda()\n",
    "    ref_meta['ref_gbid'] = ref_gbid\n",
    "    ref_meta['ref_loc'] = ref_loc\n",
    "    print(f'Convert other metas, memory: {psutil.virtual_memory().percent}')\n",
    "    return ref_meta\n",
    "\n",
    "\n",
    "def prepare_meta():\n",
    "    \"\"\"Prepare meta.\"\"\"\n",
    "\n",
    "    # NOTE: The order for ref_path_list, global_id serve as category name\n",
    "    print('Using %s as ref set' % setting['REF_SET_LIST'])\n",
    "    with open(setting['REF_SET_LIST'], 'rb') as f_ref:\n",
    "        ref_set_list = pickle.load(f_ref)\n",
    "    refindex2globalid, globalid2refindex = OrderedDict(), OrderedDict()\n",
    "    ref_name_list = []\n",
    "    global_count = 0\n",
    "    for item in ref_set_list:\n",
    "        ref_path = item[0]\n",
    "        image_name = os.path.basename(ref_path).split('.jpg')[0]\n",
    "        global_id = item[1]\n",
    "        # Ignore non-landmarks\n",
    "        if global_id < 0:\n",
    "            continue\n",
    "        assert global_id < 203094\n",
    "        ref_name_list.append(image_name)\n",
    "        if global_id not in globalid2refindex:\n",
    "            globalid2refindex[global_id] = [global_count]\n",
    "        else:\n",
    "            globalid2refindex[global_id].append(global_count)\n",
    "        refindex2globalid[global_count] = global_id\n",
    "        global_count += 1\n",
    "    print(f'{len(globalid2refindex)} unique global ids')\n",
    "    print(f'{global_count} ref images')\n",
    "    save_dic = {'globalid2refindex': globalid2refindex,\n",
    "                'refindex2globalid': refindex2globalid,\n",
    "                'ref_name_list': ref_name_list}\n",
    "    dirname = os.path.dirname(setting['REF_SET_META'])\n",
    "    if not os.path.exists(dirname):\n",
    "        os.makedirs(dirname)\n",
    "    with open(setting['REF_SET_META'], 'wb') as f_meta:\n",
    "        pickle.dump(save_dic, f_meta, pickle.HIGHEST_PROTOCOL)\n",
    "\n",
    "\n",
    "def extract_refset(reid, backbone):\n",
    "    \"\"\"Extract feature for ref set.\"\"\"\n",
    "\n",
    "    with open(setting['REF_ALL_LIST'], 'rb') as f_ref:\n",
    "        ref_set_list = pickle.load(f_ref)\n",
    "    ref_path_list = []\n",
    "    for item in ref_set_list:\n",
    "        ref_path = item[0]\n",
    "        ref_path_list.append(ref_path)\n",
    "\n",
    "    start_time = time.time()\n",
    "    all_feature_dic = reid.extract(ref_path_list)\n",
    "    save_dic = {}\n",
    "    for ref_path, feat in all_feature_dic.items():\n",
    "        ref_name = os.path.basename(ref_path).split('.jpg')[0]\n",
    "        save_dic[ref_name] = feat\n",
    "    print('%.4f s' % (time.time() - start_time))\n",
    "    pkl_name = setting['REF_SET_FEAT'].replace('.pkl', f'_{backbone}.pkl')\n",
    "    with open(pkl_name, 'wb') as f_pkl:\n",
    "        pickle.dump(save_dic, f_pkl, pickle.HIGHEST_PROTOCOL)\n",
    "    print(f'Extract refset Memory: {psutil.virtual_memory().percent}')\n",
    "\n",
    "\n",
    "# ########################## Cell 8 Get output file  #########################\n",
    "\n",
    "\n",
    "def compute_sim(backbone):\n",
    "    \"\"\"Compute initial similarities and ranklist.\"\"\"\n",
    "\n",
    "    torch.cuda.empty_cache()  # empty GPU memory\n",
    "    gc.collect()\n",
    "    print(f'Start cache sim, memory usage: {psutil.virtual_memory().percent}')\n",
    "    if not os.path.exists(setting['SIMS_DIR']):\n",
    "        os.makedirs(setting['SIMS_DIR'])\n",
    "\n",
    "    probe_name_list, probe_feats = load_feat('probe', backbone)\n",
    "    index_name_list, index_feats = load_feat('index', backbone)\n",
    "    print(len(probe_name_list), len(index_name_list))\n",
    "\n",
    "    probe_feats = torch.tensor(probe_feats).cuda().half()\n",
    "    index_feats = torch.tensor(index_feats).cuda().half()\n",
    "    if not setting['KR_FLAG'] and setting['CATEGORY_RERANK'] != 'before_merge':\n",
    "        sims = torch.matmul(probe_feats, index_feats.T).cpu().numpy()\n",
    "    elif setting['CATEGORY_RERANK'] == 'before_merge':\n",
    "        ref_info = load_meta(backbone)\n",
    "        ref_info['probe_name_list'] = probe_name_list\n",
    "        ref_info['index_name_list'] = index_name_list\n",
    "        \n",
    "        sims = torch.matmul(probe_feats, index_feats.T)\n",
    "        index_tags, index_locs = get_index_tags(index_feats, ref_info, batch_size=128)\n",
    "        probe_tags, probe_tag_scores, probe_locs, probe_loc_scores = get_probe_tags_topk(probe_feats, ref_info)\n",
    "        sims = category_rerank_after_merge(sims, probe_tags, probe_locs,\n",
    "                                               probe_tag_scores, probe_loc_scores,\n",
    "                                               index_tags, index_locs,\n",
    "                                               sim_thr=setting['CATEGORY_THR'],\n",
    "                                               alpha=setting['alpha'],\n",
    "                                               beta=setting['beta'])\n",
    "    elif setting['KR_FLAG']:\n",
    "        # NOTE: KR rerank seems not suitable here.\n",
    "        sims = kr_rerank_disk(probe_feats, index_feats)\n",
    "    else:\n",
    "        print('Unkown compute sims setting')\n",
    "    if torch.is_tensor(sims):\n",
    "        sims = sims.cpu().numpy()\n",
    "    pkl_name = os.path.join(setting['SIMS_DIR'], f'{backbone}_sims.pkl')\n",
    "    with open(pkl_name, 'wb') as f_sims:\n",
    "        pickle.dump(sims, f_sims, pickle.HIGHEST_PROTOCOL)\n",
    "    # NOTE: It is important to fix this order for all models.\n",
    "    with open(setting['NAME_LIST_FILE'], 'wb') as f_name:\n",
    "        pickle.dump([probe_name_list, index_name_list], f_name)\n",
    "\n",
    "\n",
    "def get_output():\n",
    "    \"\"\"Get output.\"\"\"\n",
    "    print(f'Get output start, memory: {psutil.virtual_memory().percent}')\n",
    "    with open(setting['NAME_LIST_FILE'], 'rb') as f_name:\n",
    "        [probe_name_list, index_name_list] = pickle.load(f_name)\n",
    "    sims = None\n",
    "    for backbone, weight in zip(setting['MODEL_LIST'], setting['MODEL_WEIGHT']):\n",
    "        pkl_name = os.path.join(setting['SIMS_DIR'], f'{backbone}_sims.pkl')\n",
    "        with open(pkl_name, 'rb') as f_sims:\n",
    "            backbone_sims = pickle.load(f_sims)\n",
    "            print(f\"backbone: {backbone}, weight: {weight}\")\n",
    "            if sims is None:\n",
    "                sims = weight * backbone_sims\n",
    "            else:\n",
    "                sims += weight * backbone_sims\n",
    "    print(f'Sim Fusion Done, memory: {psutil.virtual_memory().percent}')\n",
    "\n",
    "    if setting['CATEGORY_RERANK'] == 'after_merge':\n",
    "        sims = torch.tensor(sims).cuda().half()\n",
    "        for idx, backbone in enumerate(setting['MODEL_LIST']):\n",
    "            print('Computing category rerank after merge')\n",
    "            probe_name_list, probe_feats = load_feat('probe', backbone)\n",
    "            index_name_list, index_feats = load_feat('index', backbone)\n",
    "            print(len(probe_name_list), len(index_name_list))\n",
    "            ref_info = load_meta(backbone)\n",
    "            ref_info['probe_name_list'] = probe_name_list\n",
    "            ref_info['index_name_list'] = index_name_list\n",
    "            probe_feats = torch.tensor(probe_feats).cuda().half()\n",
    "            index_feats = torch.tensor(index_feats).cuda().half()\n",
    "            index_tags, index_locs = get_index_tags(index_feats, ref_info, batch_size=128)\n",
    "            probe_tags, probe_tag_scores, probe_locs, probe_loc_scores = get_probe_tags_topk(probe_feats, ref_info)\n",
    "            sims = category_rerank_after_merge(sims, probe_tags, probe_locs,\n",
    "                                               probe_tag_scores, probe_loc_scores,\n",
    "                                               index_tags, index_locs,\n",
    "                                               sim_thr=setting['CATEGORY_THR'],\n",
    "                                               alpha=setting['alpha'],\n",
    "                                               beta=setting['beta'])\n",
    "        print(f'Tag Rerank for each model done!, memory: {psutil.virtual_memory().percent}')\n",
    "        sims = sims.cpu().numpy()\n",
    "\n",
    "    print(f'Get sims Memory: {psutil.virtual_memory().percent}')\n",
    "    write_csv(probe_name_list, index_name_list, sims)\n",
    "\n",
    "\n",
    "def main():\n",
    "    \"\"\"Main.\"\"\"\n",
    "\n",
    "    print(f'Init Memory usage: {psutil.virtual_memory().percent}')\n",
    "    image_list, query_count = load_image_list()\n",
    "    print(f'load image Memory usage: {psutil.virtual_memory().percent}')\n",
    "    if image_list is None and query_count is None:\n",
    "        print('Dummy submission!')\n",
    "        shutil.copyfile(os.path.join(setting['IMAGE_DIR'],\n",
    "                                     'sample_submission.csv'),\n",
    "                        'submission.csv')\n",
    "        return\n",
    "    if setting['DEBUG_FLAG']:\n",
    "        debug_reid_inference(image_list)\n",
    "    if setting['CATEGORY_RERANK'] != 'off':\n",
    "        # meta info shared by all models\n",
    "        prepare_meta()\n",
    "    for backbone in setting['MODEL_LIST']:\n",
    "        if setting['REID_EXTRACT_FLAG']:\n",
    "            reid = ReID_Inference(backbone)\n",
    "            print(f'Load model, memory: {psutil.virtual_memory().percent}')\n",
    "            start_time = time.time()\n",
    "            feature_dic = reid.extract(image_list)\n",
    "            print('%.4f s for %s' % ((time.time() - start_time), backbone))\n",
    "            print(f'Extract feature Memory: {psutil.virtual_memory().percent}')\n",
    "            save_feature(feature_dic, backbone)\n",
    "            print(f'Save feature Memory: {psutil.virtual_memory().percent}')\n",
    "        if setting['REF_SET_EXTRACT']:\n",
    "            # These should be offline calculated.\n",
    "            print('Extract refset feature')\n",
    "            reid = ReID_Inference(backbone)\n",
    "            print(f'Load model, memory: {psutil.virtual_memory().percent}')\n",
    "            extract_refset(reid, backbone)\n",
    "            print(f'Extract refset Memory: {psutil.virtual_memory().percent}')\n",
    "        compute_sim(backbone)\n",
    "    print(f'Compute sim Memory: {psutil.virtual_memory().percent}')\n",
    "    get_output()\n",
    "\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    main()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.10"
  },
  "papermill": {
   "default_parameters": {},
   "duration": 12.607227,
   "end_time": "2021-10-01T18:45:36.952938",
   "environment_variables": {},
   "exception": null,
   "input_path": "__notebook__.ipynb",
   "output_path": "__notebook__.ipynb",
   "parameters": {},
   "start_time": "2021-10-01T18:45:24.345711",
   "version": "2.3.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
