{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### imports\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import torch, math\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torch.utils import data\n",
    "from torch import optim\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "\n",
    "from transformers import get_linear_schedule_with_warmup,get_cosine_with_hard_restarts_schedule_with_warmup\n",
    "from transformers import AutoTokenizer, AutoModel\n",
    "\n",
    "from gensim import corpora, similarities, models\n",
    "from gensim.matutils import corpus2csc\n",
    "from multiprocessing import shared_memory\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "from collections import Counter\n",
    "import base64\n",
    "from tqdm.auto import tqdm\n",
    "import pickle, random\n",
    "import matplotlib.pyplot as plt\n",
    "from multiprocessing import Pool\n",
    "import sys, csv, json, os, gc, time"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### parameters\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda:0\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<torch._C.Generator at 0x7f52501c7d50>"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "no = '0'\n",
    "device = torch.device('cuda:'+no) if torch.cuda.is_available() else torch.device('cpu')\n",
    "print(device)\n",
    "\n",
    "k = 10\n",
    "lr = 1e-5\n",
    "# true batch size = batch_size * grad_step\n",
    "batch_size = 64\n",
    "margin = 8\n",
    "grad_step = 1\n",
    "max_img_len = 30\n",
    "epochs = 1 # only read data once\n",
    "MOD = 20000\n",
    "shuffle_fold = True\n",
    "workers = 2\n",
    "seed = 7414\n",
    "random.seed(seed)\n",
    "torch.manual_seed(seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class params:\n",
    "    LABEL_SIZE = 32\n",
    "    IMG_FEAT_SIZE = 2048+6+LABEL_SIZE\n",
    "    WORD_EMBED_SIZE = 1024\n",
    "    LAYER = 12\n",
    "    HIDDEN_SIZE = 1024\n",
    "    MULTI_HEAD = 16\n",
    "    DROPOUT_R = 0.1\n",
    "    FLAT_MLP_SIZE = 512\n",
    "    FLAT_GLIMPSES = 1\n",
    "    FLAT_OUT_SIZE = 2048\n",
    "    FF_SIZE = HIDDEN_SIZE*4\n",
    "    HIDDEN_SIZE_HEAD = HIDDEN_SIZE // MULTI_HEAD\n",
    "    OPT_BETAS = (0.9, 0.98)\n",
    "    OPT_EPS = 1e-9\n",
    "    TRAIN_SIZE = 3000000\n",
    "\n",
    "__C = params()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### load data\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "trash = {'!', '$', \"'ll\", \"'s\", ',', '&', ':', 'and', 'cut', 'is', 'are', 'was'}\n",
    "trash_replace = ['\"hey siri, play some', 'however, ', 'yin and yang, ',\n",
    "                 'shopping mall/']\n",
    "\n",
    "def process(x):\n",
    "    tmp = x.split()\n",
    "    if tmp[0] in trash: x = ' '.join(tmp[1:])\n",
    "    if tmp[0][0] == '-': x = x[1:]\n",
    "    for tr in trash_replace:\n",
    "        x = x.replace(tr, '')\n",
    "    return x\n",
    "\n",
    "def normalize(x):\n",
    "    ret = x['boxes'].copy()\n",
    "    ret[:,0] /= x['image_h']\n",
    "    ret[:,1] /= x['image_w']\n",
    "    ret[:,2] /= x['image_h']\n",
    "    ret[:,3] /= x['image_w']\n",
    "    wh = (ret[:,2]-ret[:,0]) * (ret[:,3]-ret[:,1])\n",
    "    wh2 = (ret[:,2]-ret[:,0]) / (ret[:,3]-ret[:,1]+1e-6)\n",
    "    ret = np.hstack((ret, wh.reshape(-1,1), wh2.reshape(-1,1)))\n",
    "    return ret\n",
    "\n",
    "def load_data(file_name, reset=False, decode=True):\n",
    "    ret = pd.read_csv(file_name, sep='\\t')\n",
    "    if decode:\n",
    "        ret['boxes'] = ret['boxes'].apply(lambda x: np.frombuffer(base64.b64decode(x), dtype=np.float32).reshape(-1, 4))\n",
    "        ret['features'] = ret['features'].apply(lambda x: np.frombuffer(base64.b64decode(x), dtype=np.float32).reshape(-1, 2048))\n",
    "        ret['class_labels'] = ret['class_labels'].apply(lambda x: np.frombuffer(base64.b64decode(x), dtype=np.int64).reshape(-1, 1))\n",
    "        ret['boxes'] = ret.apply(lambda x: normalize(x), axis=1)\n",
    "        ret['features'] = ret.apply(lambda x: np.concatenate((x['class_labels'], x['features'], x['boxes']), axis=1)[:max_img_len], axis=1)\n",
    "    ret['query'] = ret['query'].apply(lambda x: process(x))\n",
    "    # reset query_id\n",
    "    if reset:\n",
    "        query2qid = {query: qid for qid, (query, _) in enumerate(tqdm(ret.groupby('query')))}\n",
    "        ret['query_id'] = ret.apply(lambda x: query2qid[x['query']], axis=1)\n",
    "    return ret"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "path = './'\n",
    "test = load_data(path+'valid.tsv')\n",
    "testA = load_data(path+'testB.tsv')\n",
    "answers = json.loads(open(path+'valid_answer.json', 'r').read())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### preprocess\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "08590928acc9420a9faa3e47244fc2b5",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=496.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "# load pre-trained model\n",
    "take = 'roberta-large'\n",
    "emb_size = __C.WORD_EMBED_SIZE\n",
    "tokenizer = AutoTokenizer.from_pretrained(take)\n",
    "pretrained_emb = AutoModel.from_pretrained(take)\n",
    "pad_id = tokenizer.pad_token_id\n",
    "\n",
    "qid2token = {qid: tokenizer.encode(group['query'].values[0]) for qid, group in tqdm(test.groupby('query_id'))}\n",
    "test['token'] = test['query_id'].apply(lambda x: qid2token[x])\n",
    "# qid2token = {qid: tokenizer.encode(group['query'].values[0]) for qid, group in tqdm(testA.groupby('query_id'))}\n",
    "# testA['token'] = testA['query_id'].apply(lambda x: qid2token[x])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### training data\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "def one(x):\n",
    "    return (x+1e-10)/(x+1e-10)\n",
    "\n",
    "def get_negs(train):\n",
    "    qid2idxs = {}\n",
    "    corpus = []\n",
    "    idx2qid = {}\n",
    "\n",
    "    for idx, (qid, group) in enumerate(train.groupby('query_id')):\n",
    "        qid2idxs[qid] = group.index\n",
    "        corpus.append(group['query'].values[0])\n",
    "        idx2qid[idx] = qid\n",
    "\n",
    "    topk = len(max(qid2idxs.values(), key=lambda x: len(x)))*3\n",
    "    corpus = [sent.split() for sent in corpus]\n",
    "    dictionary = corpora.Dictionary(corpus)\n",
    "    corpus = [dictionary.doc2bow(text) for text in corpus]\n",
    "    tfidf_model = models.TfidfModel(corpus, wlocal=one, dictionary=dictionary)\n",
    "    corpus_tfidf = corpus2csc(tfidf_model[corpus])\n",
    "    sm = corpus_tfidf.T.dot(corpus_tfidf)\n",
    "    qid2negs = {}\n",
    "    \n",
    "    for idx, (le, ri) in enumerate(tqdm(zip(sm.indptr[:-1], sm.indptr[1:]), total=sm.shape[0])):\n",
    "        n_row_pick = min(topk, ri-le)\n",
    "        top_n_idx = sm.indices[le+np.argpartition(sm.data[le:ri], -n_row_pick)[-n_row_pick:]].tolist()\n",
    "        if n_row_pick < topk: top_n_idx += random.sample(range(sm.shape[0]), topk-n_row_pick)\n",
    "        qid2negs[idx2qid[idx]] = [idx2qid[neg] for neg in top_n_idx if neg != idx]\n",
    "        \n",
    "    return qid2negs, qid2idxs"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_data(train, qid2negs, qid2idxs):\n",
    "    train_x = [] # [tokens, feature1, feature2]\n",
    "    \n",
    "    for qid, group in tqdm(train.groupby('query_id')):\n",
    "        # positive\n",
    "        pos = group.index\n",
    "        token = group['token'].iloc[0]\n",
    "        # negative\n",
    "        for i in range(k):\n",
    "            neg = random.sample(qid2negs[qid], len(pos))\n",
    "            neg = [random.choice(qid2idxs[n]) for n in neg]\n",
    "            train_x += [[token, pos[i], neg[i]] for i in range(len(pos))]\n",
    "    \n",
    "    print('number of training data:', len(train_x))\n",
    "    return train_x"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### model\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FC(nn.Module):\n",
    "    def __init__(self, in_size, out_size, dropout_r=0., use_relu=True):\n",
    "        super(FC, self).__init__()\n",
    "        self.dropout_r = dropout_r\n",
    "        self.use_relu = use_relu\n",
    "\n",
    "        self.linear = nn.Linear(in_size, out_size)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.dropout = nn.Dropout(dropout_r)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.linear(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.dropout(x)\n",
    "        return x\n",
    "\n",
    "\n",
    "class MLP(nn.Module):\n",
    "    def __init__(self, in_size, mid_size, out_size, dropout_r=0., use_relu=True):\n",
    "        super(MLP, self).__init__()\n",
    "\n",
    "        self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)\n",
    "        self.linear = nn.Linear(mid_size, out_size)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.linear(self.fc(x))\n",
    "\n",
    "\n",
    "class LayerNorm(nn.Module):\n",
    "    def __init__(self, size, eps=1e-6):\n",
    "        super(LayerNorm, self).__init__()\n",
    "        self.eps = eps\n",
    "\n",
    "        self.a_2 = nn.Parameter(torch.ones(size))\n",
    "        self.b_2 = nn.Parameter(torch.zeros(size))\n",
    "\n",
    "    def forward(self, x):\n",
    "        mean = x.mean(-1, keepdim=True)\n",
    "        std = x.std(-1, keepdim=True)\n",
    "        return self.a_2 * (x-mean) / (std+self.eps) + self.b_2\n",
    "    \n",
    "class MHAtt(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(MHAtt, self).__init__()\n",
    "        self.__C = __C\n",
    "\n",
    "        self.linear_v = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.linear_k = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.linear_q = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.linear_merge = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.dropout = nn.Dropout(__C.DROPOUT_R)\n",
    "\n",
    "    def forward(self, v, k, q, mask):\n",
    "        n_batches = q.size(0)\n",
    "        v = self.linear_v(v).view(n_batches,\n",
    "                                  -1,\n",
    "                                  self.__C.MULTI_HEAD,\n",
    "                                  self.__C.HIDDEN_SIZE_HEAD).transpose(1, 2)\n",
    "        k = self.linear_k(k).view(n_batches,\n",
    "                                  -1,\n",
    "                                  self.__C.MULTI_HEAD,\n",
    "                                  self.__C.HIDDEN_SIZE_HEAD).transpose(1, 2)\n",
    "        q = self.linear_q(q).view(n_batches,\n",
    "                                  -1,\n",
    "                                  self.__C.MULTI_HEAD,\n",
    "                                  self.__C.HIDDEN_SIZE_HEAD).transpose(1, 2)\n",
    "\n",
    "        atted = self.att(v, k, q, mask)\n",
    "        atted = atted.transpose(1, 2).contiguous().view(n_batches, -1, self.__C.HIDDEN_SIZE)\n",
    "        atted = self.linear_merge(atted)\n",
    "\n",
    "        return atted\n",
    "\n",
    "    def att(self, value, key, query, mask):\n",
    "        d_k = query.size(-1)\n",
    "        scores = torch.matmul(query, key.transpose(-2, -1))/math.sqrt(d_k)\n",
    "        scores = scores.masked_fill(mask, -1e9)\n",
    "        att_map = F.softmax(scores, dim=-1)\n",
    "        att_map = self.dropout(att_map)\n",
    "        return torch.matmul(att_map, value)\n",
    "\n",
    "# ---------------------------\n",
    "# ---- Feed Forward Nets ----\n",
    "# ---------------------------\n",
    "\n",
    "class FFN(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(FFN, self).__init__()\n",
    "\n",
    "        self.mlp = MLP(in_size=__C.HIDDEN_SIZE,\n",
    "                       mid_size=__C.FF_SIZE,\n",
    "                       out_size=__C.HIDDEN_SIZE,\n",
    "                       dropout_r=__C.DROPOUT_R,\n",
    "                       use_relu=True)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.mlp(x)\n",
    "\n",
    "# ------------------------\n",
    "# ---- Self Attention ----\n",
    "# ------------------------\n",
    "\n",
    "class SA(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(SA, self).__init__()\n",
    "\n",
    "        self.mhatt = MHAtt(__C)\n",
    "        self.ffn = FFN(__C)\n",
    "\n",
    "        self.dropout1 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm1 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "        self.dropout2 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm2 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "    def forward(self, x, x_mask):\n",
    "        x = self.norm1(x + self.dropout1(self.mhatt(x, x, x, x_mask)))\n",
    "        x = self.norm2(x + self.dropout2(self.ffn(x)))\n",
    "        return x\n",
    "\n",
    "# -------------------------------\n",
    "# ---- Self Guided Attention ----\n",
    "# -------------------------------\n",
    "\n",
    "class SGA(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(SGA, self).__init__()\n",
    "\n",
    "        self.mhatt1 = MHAtt(__C)\n",
    "        self.mhatt2 = MHAtt(__C)\n",
    "        self.ffn = FFN(__C)\n",
    "\n",
    "        self.dropout1 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm1 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "        self.dropout2 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm2 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "        self.dropout3 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm3 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "    def forward(self, x, y, x_mask, y_mask):\n",
    "        x = self.norm1(x + self.dropout1(self.mhatt1(x, x, x, x_mask)))\n",
    "        x = self.norm2(x + self.dropout2(self.mhatt2(y, y, x, y_mask)))\n",
    "        x = self.norm3(x + self.dropout3(self.ffn(x)))\n",
    "        return x\n",
    "    \n",
    "class GA(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(GA, self).__init__()\n",
    "\n",
    "        self.mhatt2 = MHAtt(__C)\n",
    "        self.ffn = FFN(__C)\n",
    "\n",
    "        self.dropout2 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm2 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "        self.dropout3 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm3 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "    def forward(self, x, y, x_mask, y_mask):\n",
    "        x = self.norm2(x + self.dropout2(self.mhatt2(y, y, x, y_mask)))\n",
    "        x = self.norm3(x + self.dropout3(self.ffn(x)))\n",
    "        return x\n",
    "        \n",
    "# ------------------------------------------------\n",
    "# ---- MAC Layers Cascaded by Encoder-Decoder ----\n",
    "# ------------------------------------------------\n",
    "\n",
    "class MCA_ED(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(MCA_ED, self).__init__()\n",
    "\n",
    "        self.enc_list = nn.ModuleList([SA(__C) for _ in range(__C.LAYER)])\n",
    "        self.dec_list = nn.ModuleList([SGA(__C) for _ in range(__C.LAYER)])\n",
    "\n",
    "    def forward(self, x, y, x_mask, y_mask):\n",
    "        # Get hidden vector\n",
    "        for enc in self.enc_list:\n",
    "            x = enc(x, x_mask)\n",
    "        for dec in self.dec_list:\n",
    "            y = dec(y, x, y_mask, x_mask)\n",
    "        return x, y\n",
    "      \n",
    "    \n",
    "class AttFlat(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(AttFlat, self).__init__()\n",
    "        self.__C = __C\n",
    "\n",
    "        self.mlp = MLP(in_size=__C.HIDDEN_SIZE,\n",
    "                       mid_size=__C.FLAT_MLP_SIZE,\n",
    "                       out_size=__C.FLAT_GLIMPSES,\n",
    "                       dropout_r=__C.DROPOUT_R,\n",
    "                       use_relu=True)\n",
    "        self.linear_merge = nn.Linear(__C.HIDDEN_SIZE*__C.FLAT_GLIMPSES, __C.FLAT_OUT_SIZE)\n",
    "\n",
    "    def forward(self, x, x_mask):\n",
    "        att = self.mlp(x)\n",
    "        att = att.masked_fill(x_mask.squeeze(1).squeeze(1).unsqueeze(2), -1e9)\n",
    "        att = F.softmax(att, dim=1)\n",
    "\n",
    "        att_list = []\n",
    "        for i in range(self.__C.FLAT_GLIMPSES):\n",
    "            att_list.append(torch.sum(att[:,:,i:i+1]*x, dim=1))\n",
    "\n",
    "        x_atted = torch.cat(att_list, dim=1)\n",
    "        x_atted = self.linear_merge(x_atted)\n",
    "        return x_atted\n",
    "\n",
    "# -------------------------\n",
    "# ---- Main MCAN Model ----\n",
    "# -------------------------\n",
    "\n",
    "class Net(nn.Module):\n",
    "    def __init__(self, __C, pretrained_emb, answer_size):\n",
    "        super(Net, self).__init__()\n",
    "\n",
    "        self.embedding = pretrained_emb.embeddings\n",
    "        self.label_emb = nn.Embedding(33, __C.LABEL_SIZE)\n",
    "        self.img_feat_linear = MLP(__C.IMG_FEAT_SIZE, __C.IMG_FEAT_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.lang_feat_linear = nn.Linear(__C.WORD_EMBED_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.backbone = MCA_ED(__C)\n",
    "\n",
    "        self.attflat_img = AttFlat(__C)\n",
    "        self.attflat_lang = AttFlat(__C)\n",
    "        \n",
    "        self.proj_norm_lang = LayerNorm(__C.FLAT_OUT_SIZE)\n",
    "        self.proj_norm_img = LayerNorm(__C.FLAT_OUT_SIZE)\n",
    "        self.proj_norm_mul = LayerNorm(__C.FLAT_OUT_SIZE)\n",
    "        self.proj_norm_dis = LayerNorm(__C.FLAT_OUT_SIZE)\n",
    "        self.proj = MLP(__C.FLAT_OUT_SIZE*4, __C.FLAT_OUT_SIZE*2, answer_size)\n",
    "\n",
    "    def forward(self, ques_ix, img_feats):\n",
    "        proj_feats = []\n",
    "        for img_feat in img_feats:\n",
    "            # Make mask\n",
    "            lang_feat_mask = self.make_mask(ques_ix.unsqueeze(2), pad_id)\n",
    "            img_feat_mask = self.make_mask(img_feat, 0)\n",
    "\n",
    "            # Pre-process Language Feature\n",
    "            lang_feat = self.embedding(ques_ix)\n",
    "            lang_feat = self.lang_feat_linear(lang_feat)\n",
    "\n",
    "            # Pre-process Image Feature\n",
    "            label_feat = self.label_emb(img_feat[:,:,0].long())\n",
    "            img_feat = torch.cat((img_feat[:,:,1:], label_feat), dim=2)\n",
    "            img_feat = self.img_feat_linear(img_feat)\n",
    "\n",
    "            # Backbone Framework\n",
    "            lang_feat, img_feat = self.backbone(lang_feat, img_feat, lang_feat_mask, img_feat_mask)\n",
    "            lang_feat = self.attflat_lang(lang_feat, lang_feat_mask)\n",
    "            img_feat = self.attflat_img(img_feat, img_feat_mask)\n",
    "            distance = torch.abs(lang_feat-img_feat)\n",
    "            \n",
    "            proj_feat = torch.cat((self.proj_norm_lang(lang_feat),\n",
    "                                   self.proj_norm_img(img_feat),\n",
    "                                   self.proj_norm_mul(lang_feat*img_feat),\n",
    "                                   self.proj_norm_dis(distance)\n",
    "                                  ), dim=1)\n",
    "            proj_feat = self.proj(proj_feat)\n",
    "            proj_feats.append(proj_feat)\n",
    "        return proj_feats\n",
    "\n",
    "    # Masking\n",
    "    def make_mask(self, feature, target):\n",
    "        return (torch.sum(torch.abs(feature), dim=-1) == target).unsqueeze(1).unsqueeze(2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### train\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict(model):\n",
    "    model = model.eval()\n",
    "    preds = {}\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for qid, group in tqdm(test.groupby('query_id')):\n",
    "            # prepare batch\n",
    "            tokens, features = group['token'].values.tolist(), group['features'].values.tolist()\n",
    "            max_len_f = len(max(features, key=lambda x: len(x)))\n",
    "            features = [np.concatenate((feature, np.zeros((max_len_f-feature.shape[0], feature.shape[1]))), axis=0) for feature in features]\n",
    "            # # to tensor\n",
    "            tokens = torch.LongTensor(tokens).to(device)\n",
    "            features = torch.FloatTensor(features).to(device)\n",
    "            # predict\n",
    "            out = model(tokens, (features,))[0].view(-1)\n",
    "            pred = [(pid, val) for pid, val in zip(group['product_id'].values.tolist(), out.tolist())]\n",
    "            pred.sort(key=lambda x: x[1], reverse=True)\n",
    "            preds[qid] = [pid for pid, _ in pred[:5]]\n",
    "            \n",
    "    model = model.train()\n",
    "    return preds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CustomDataset(data.Dataset):\n",
    "    def __init__(self, train_x):\n",
    "        self.train_x = train_x\n",
    "        \n",
    "    def __getitem__(self, index):\n",
    "        tokens, pos_features, neg_features = self.train_x[index][0], self.train_x[index][1], self.train_x[index][2]\n",
    "        return [tokens, pos_features, neg_features]\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.train_x)\n",
    "    \n",
    "def collate_fn(batch):\n",
    "    tokens, pos_features, neg_features = zip(*batch)\n",
    "    max_len_t, max_len_pf, max_len_nf = len(max(tokens, key=lambda x: len(x))), len(max(pos_features, key=lambda x: len(x))), len(max(neg_features, key=lambda x: len(x)))\n",
    "    tokens, pos_features, neg_features = [token+[pad_id]*(max_len_t-len(token)) for token in tokens], [np.concatenate((feature, np.zeros((max_len_pf-feature.shape[0], feature.shape[1]))), axis=0) for feature in pos_features], [np.concatenate((feature, np.zeros((max_len_nf-feature.shape[0], feature.shape[1]))), axis=0) for feature in neg_features]\n",
    "    return torch.LongTensor(tokens), torch.FloatTensor(pos_features), torch.FloatTensor(neg_features)\n",
    "\n",
    "def custom_schedule(optimizer, num_warmup_steps, num_training_steps, num_cycles=0.5, amplitude=0.1, last_epoch=-1):\n",
    "    \n",
    "    def lr_lambda(current_step):\n",
    "        if current_step < num_warmup_steps:\n",
    "            return float(current_step) / float(max(1, num_warmup_steps))\n",
    "        progress = 2.0 * math.pi * float(num_cycles) * float(current_step-num_warmup_steps) / float(max(1, num_training_steps-num_warmup_steps))\n",
    "        linear = float(num_training_steps-current_step) / float(max(1, num_training_steps-num_warmup_steps))\n",
    "        return abs(linear + math.sin(progress)*linear*amplitude)\n",
    "\n",
    "    return LambdaLR(optimizer, lr_lambda, last_epoch)\n",
    "\n",
    "def shuffle(x):\n",
    "    idxs = [i for i in range(x.shape[0])]\n",
    "    random.shuffle(idxs)\n",
    "    return x[idxs]\n",
    "\n",
    "def nDCG_score(preds, answers):\n",
    "    iDCG = sum([sum([np.log(2)/np.log(i+2) for i in range(min(len(answer), 5))]) \\\n",
    "                for answer in list(answers.values())])\n",
    "    DCG = sum([sum([np.log(2)/np.log(i+2) if preds[qid][i] in answers[str(qid)] else 0 \\\n",
    "                    for i in range(len(preds[qid]))]) for qid in list(preds.keys())])\n",
    "    return DCG/iDCG\n",
    "\n",
    "class FocalLoss(nn.Module):\n",
    "    def __init__(self, alpha=1, gamma=2, logits=False, reduce=True):\n",
    "        super(FocalLoss, self).__init__()\n",
    "        self.alpha = alpha\n",
    "        self.gamma = gamma\n",
    "        self.logits = logits\n",
    "        self.reduce = reduce\n",
    "\n",
    "    def forward(self, inputs, targets):\n",
    "        if self.logits:\n",
    "            BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduce=False)\n",
    "        else:\n",
    "            BCE_loss = F.binary_cross_entropy(inputs, targets, reduce=False)\n",
    "        pt = torch.exp(-BCE_loss)\n",
    "        F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss\n",
    "\n",
    "        if self.reduce:\n",
    "            return torch.mean(F_loss)\n",
    "        else:\n",
    "            return F_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_data_merge(idx):\n",
    "#     train = load_data(path+'train.sample.tsv', reset=True)\n",
    "    train = load_data(path+'data/train_{}.tsv'.format(idx), reset=True, decode=False)\n",
    "    qid2token = {qid: tokenizer.encode(group['query'].values[0]) for qid, group in train.groupby('query_id')}\n",
    "    train['token'] = train['query_id'].apply(lambda x: qid2token[x])\n",
    "    qid2negs, qid2idxs = get_negs(train)\n",
    "    pickle.dump(get_data(train, qid2negs, qid2idxs),\n",
    "                open(path+'data/train_x_{}_{}.pkl'.format(idx, seed), 'wb'))\n",
    "    return None"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "metadata": {},
   "outputs": [],
   "source": [
    "t0 = time.time()\n",
    "with Pool(5) as pool:\n",
    "    pool.map(get_data_merge, [i for i in range(20)])\n",
    "t = round((time.time()-t0)/60)\n",
    "print('time consumed: {} hr {} min'.format(t//60, t%60))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "initializing model...\n",
      "fold: 0\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "0c3165fff60f43dc81fcfb1159cd0f5e",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 1\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "5c52c06ab9de4c9e8377c4a6c0438613",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 2\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b04eec4e2aa742cba4b456118ee72e05",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 3\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "9ff3f212a102424d885128d25e10024a",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 4\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6dc6402da823481c992bb4d5ff94685d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 5\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f165d1f5774245b4b1a61d68942290a5",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 6\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "ce5b501a9e5443a3a2df6538289b3a60",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 7\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "887162a533834ef8b5511ec65f77785f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 8\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "1b96debbb05c4ab3afb25fe22ab2ef81",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 9\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "e925108968f74046851ce231956533e6",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 10\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6f8d5140c9bb4057b6ed2813bb6a4f4f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 11\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "c7b90cd03b014f5093a6c9df1f912a51",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 12\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "63ffb5e5884a40aebae02a629f2097fe",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 13\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "06c9551aef824b3fa775e3d50bf7a61a",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 14\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "ed3b43a8899f40c39680620ac74681b8",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 15\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "b32dc4e9f59a49e18fccf25494e41028",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 16\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "abd46175cf39495aa93997c1c07ca09f",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 17\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "6c1e93159b0340b6a93db6fd1f2b2797",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 18\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "f9c4c50ba0cd46d394d9071a3ffe0af2",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "fold: 19\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "14cdb9ebb88d44a690eaa6997366281c",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=1500000.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "train all size: 750000\n",
      "start training!\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "ecb827d56ba14ac6b24b8356b5b7590a",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "HBox(children=(FloatProgress(value=0.0, max=11719.0), HTML(value='')))"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/u1451673/.conda/envs/myenv/lib/python3.8/site-packages/torch/nn/_reduction.py:43: UserWarning: size_average and reduce args will be deprecated, please use reduction='none' instead.\n",
      "  warnings.warn(warning.format(ret))\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loss:0.36396177113056183\n"
     ]
    }
   ],
   "source": [
    "print('initializing model...')\n",
    "nDCGs = []\n",
    "best_nDCG = 0.0\n",
    "model = Net(__C, pretrained_emb, 1).to(device)\n",
    "num_training_steps = np.ceil(__C.TRAIN_SIZE*k / (batch_size*grad_step)) * epochs\n",
    "num_warmup_steps = int(num_training_steps*0.1)\n",
    "eval_steps = num_training_steps//40*grad_step + 100\n",
    "optimizer = optim.AdamW(model.parameters(), lr=lr)\n",
    "scheduler = custom_schedule(optimizer,\n",
    "                            num_warmup_steps=num_warmup_steps,\n",
    "                            num_training_steps=num_training_steps,\n",
    "                            num_cycles=6,\n",
    "                            amplitude=0.3)\n",
    "train_x_all = []\n",
    "shm_list = []\n",
    "\n",
    "for idx in range(20):\n",
    "    print('fold:', idx)\n",
    "    with open('shape_list_{}'.format(idx), 'rb') as file:\n",
    "        shape_list = pickle.load(file) \n",
    "    shm = shared_memory.SharedMemory(name='train_{}'.format(idx))\n",
    "    shm_list.append(shm)\n",
    "    train = np.ndarray((shape_list[-1], 2055), dtype=np.float64, buffer=shm.buf)\n",
    "    train_x = pickle.load(open(path+'data/train_x_{}_{}.pkl'.format(idx, seed), 'rb'))\n",
    "    train_x_all += [[t, train[shape_list[p]:shape_list[p+1]], train[shape_list[n]:shape_list[n+1]]] \\\n",
    "                    for t, p, n in tqdm(train_x)]\n",
    "\n",
    "random.shuffle(train_x_all)\n",
    "steps = 40\n",
    "chunk_size = len(train_x_all)//steps\n",
    "\n",
    "for chunk in range(steps):\n",
    "    train_loader = data.DataLoader(CustomDataset(train_x_all[chunk_size*chunk:chunk_size*(chunk+1)]),\n",
    "                                   batch_size=batch_size,\n",
    "                                   shuffle=False,\n",
    "                                   collate_fn=collate_fn,\n",
    "                                   num_workers=workers)\n",
    "    print('train all size:', len(train_loader.dataset))\n",
    "\n",
    "    print('start training!')\n",
    "    model = model.train()\n",
    "    criterion = FocalLoss()\n",
    "    total_loss = 0.0\n",
    "    step = len(train_loader)\n",
    "    optimizer.zero_grad()\n",
    "    pbar = tqdm(enumerate(train_loader), total=step)\n",
    "\n",
    "    for i, batch in pbar:\n",
    "        # prepare batch\n",
    "        tokens, pos_features, neg_features = batch\n",
    "        # # to device\n",
    "        tokens = tokens.to(device)\n",
    "        pos_features = pos_features.to(device)\n",
    "        neg_features = neg_features.to(device)\n",
    "        # predict\n",
    "        pos, neg = model(tokens, (pos_features, neg_features))\n",
    "        pos = torch.sigmoid(pos).view(-1)\n",
    "        neg = torch.sigmoid(neg).view(-1)\n",
    "        l = criterion(pos, torch.ones(pos.size()).to(device))\n",
    "        l.backward()\n",
    "        total_loss += l.item()\n",
    "        l = criterion(neg, torch.zeros(neg.size()).to(device))\n",
    "        l.backward()\n",
    "        total_loss += l.item()\n",
    "        pbar.set_postfix({'loss': total_loss/(i+1)})\n",
    "        # optim step\n",
    "        if (i+1)%grad_step == 0:\n",
    "            optimizer.step()\n",
    "            scheduler.step()\n",
    "            optimizer.zero_grad()\n",
    "        # evaluate\n",
    "        if (i+1)%eval_steps == 0:\n",
    "            preds = predict(model)\n",
    "            nDCG = nDCG_score(preds, answers)\n",
    "            nDCGs.append(nDCG)\n",
    "            pickle.dump(nDCGs, open('log_nDCG_MCAN-RoBERTa_pair-cat_box_tfidf-neg_focal_all_shared', 'wb'))\n",
    "            print('nDCG@5:', nDCG)\n",
    "            # save models\n",
    "            if len(nDCGs) > 30:\n",
    "                print('saving model...')\n",
    "                torch.save(model.state_dict(), path+'models/model_MCAN-RoBERTa_pair-cat_box_tfidf-neg_focal_all_shared_{}_{}'.format(seed, len(nDCGs)-1))\n",
    "#                 torch.save(optimizer.state_dict(), path+'models/optimizer_{}_{}'.format(seed, len(nDCGs)-1))\n",
    "#                 torch.save(scheduler.state_dict(), path+'models/scheduler_{}_{}'.format(seed, len(nDCGs)-1))\n",
    "        # print loss\n",
    "        if i%MOD == 0: print('Loss:{}'.format(total_loss/(i+1)))\n",
    "\n",
    "    if step%grad_step:\n",
    "        optimizer.step()\n",
    "        scheduler.step()\n",
    "\n",
    "    # evaluation\n",
    "    preds = predict(model)\n",
    "    nDCG = nDCG_score(preds, answers)\n",
    "    nDCGs.append(nDCG)\n",
    "    pickle.dump(nDCGs, open('log_nDCG_MCAN-RoBERTa_pair-cat_box_tfidf-neg_focal_all_shared', 'wb'))\n",
    "    print('nDCG@5:', nDCG)\n",
    "    # delete garbage\n",
    "    del train_loader\n",
    "    gc.collect()\n",
    "    # save models\n",
    "    if len(nDCGs) > 30:\n",
    "        print('saving model...')\n",
    "        torch.save(model.state_dict(), path+'models/model_MCAN-RoBERTa_pair-cat_box_tfidf-neg_focal_all_shared_{}_{}'.format(seed, len(nDCGs)-1))\n",
    "#         torch.save(optimizer.state_dict(), path+'models/optimizer_{}_{}'.format(seed, len(nDCGs)-1))\n",
    "#         torch.save(scheduler.state_dict(), path+'models/scheduler_{}_{}'.format(seed, len(nDCGs)-1))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prediction\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict(model, test, pad_len):\n",
    "    model.eval()\n",
    "    counts = Counter(test['product_id'].values.tolist())\n",
    "    preds = {}\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for qid, group in tqdm(test.groupby('query_id')):\n",
    "            # prepare batch\n",
    "            tokens, features = group['token'].values.tolist(), group['features'].values.tolist()\n",
    "            max_len_f = len(max(features, key=lambda x: len(x)))\n",
    "            features = [np.concatenate((feature, np.zeros((max_len_f-feature.shape[0], feature.shape[1]))), axis=0) for feature in features]\n",
    "            # # to tensor\n",
    "            tokens = torch.LongTensor(tokens).to(device)\n",
    "            features = torch.FloatTensor(features).to(device)\n",
    "            # predict\n",
    "            out = model(tokens, (features,))[0].view(-1)\n",
    "            pred = [(pid, val) for pid, val in zip(group['product_id'].values.tolist(), out.tolist())]\n",
    "            pred.sort(key=lambda x: x[1], reverse=True)\n",
    "            assert len(pred) <= pad_len\n",
    "            pid, score = [p for p, s in pred], [s for p, s in pred]\n",
    "            pid, score = pid+[np.nan]*(pad_len-len(pred)), score+[np.nan]*(pad_len-len(pred))\n",
    "            preds[qid] = pid+score\n",
    "            \n",
    "    return preds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "folds = [i for i in range(30, 40)]\n",
    "pad_len = 30\n",
    "\n",
    "for fold in folds:\n",
    "    print('seed: {}; fold: {}'.format(seed, fold))\n",
    "    # load model weights\n",
    "    model.load_state_dict(torch.load(path+'models/model_MCAN-RoBERTa_pair-cat_box_tfidf-neg_focal_all_shared_{}_{}'.format(seed, fold), map_location=device))\n",
    "    # predict\n",
    "    preds = predict(model, testA, pad_len)\n",
    "    # write to file\n",
    "    header = ['qid'] + ['p'+str(i) for i in range(pad_len)] + ['s'+str(i) for i in range(pad_len)]\n",
    "    with open('predictions/prediction_all_{}_{}.csv'.format(seed, fold), 'w', newline='') as f:\n",
    "        w = csv.writer(f)\n",
    "        w.writerow(header)\n",
    "        for qid in sorted(list(preds.keys())):\n",
    "            w.writerow([qid]+preds[qid])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python (myenv)",
   "language": "python",
   "name": "myenv"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.8.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
