{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### imports\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:516: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:517: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:518: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:519: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:520: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorflow/python/framework/dtypes.py:525: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n",
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:541: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint8 = np.dtype([(\"qint8\", np.int8, 1)])\n",
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:542: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint8 = np.dtype([(\"quint8\", np.uint8, 1)])\n",
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:543: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint16 = np.dtype([(\"qint16\", np.int16, 1)])\n",
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:544: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_quint16 = np.dtype([(\"quint16\", np.uint16, 1)])\n",
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:545: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  _np_qint32 = np.dtype([(\"qint32\", np.int32, 1)])\n",
      "/home/u1451673/.local/lib/python3.6/site-packages/tensorboard/compat/tensorflow_stub/dtypes.py:550: FutureWarning: Passing (type, 1) or '1type' as a synonym of type is deprecated; in a future version of numpy, it will be understood as (type, (1,)) / '(1,)type'.\n",
      "  np_resource = np.dtype([(\"resource\", np.ubyte, 1)])\n"
     ]
    }
   ],
   "source": [
    "import torch, math\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "from torch.utils import data\n",
    "from torch import optim\n",
    "from torch.optim.lr_scheduler import LambdaLR\n",
    "\n",
    "from transformers import get_linear_schedule_with_warmup,get_cosine_with_hard_restarts_schedule_with_warmup\n",
    "from transformers import AutoTokenizer, AutoModel\n",
    "\n",
    "from gensim import corpora, similarities, models\n",
    "from gensim.matutils import corpus2csc\n",
    "\n",
    "import pandas as pd \n",
    "import numpy as np\n",
    "from collections import Counter\n",
    "import base64\n",
    "from tqdm import tqdm\n",
    "import pickle, random\n",
    "import matplotlib.pyplot as plt\n",
    "from multiprocessing import Pool\n",
    "import sys, csv, json, os, gc, time"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### parameters\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda:0\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<torch._C.Generator at 0x7fbb75cc3810>"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "no = '0'\n",
    "device = torch.device('cuda:'+no) if torch.cuda.is_available() else torch.device('cpu')\n",
    "print(device)\n",
    "\n",
    "k = 10\n",
    "lr = 1e-5\n",
    "# true batch size = batch_size * grad_step\n",
    "batch_size = 64\n",
    "margin = 8\n",
    "grad_step = 1\n",
    "max_img_len = 30\n",
    "epochs = 1 # only read data once\n",
    "MOD = 20000\n",
    "shuffle_fold = True\n",
    "workers = 48\n",
    "seed = 9115\n",
    "random.seed(seed)\n",
    "torch.manual_seed(seed)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class params:\n",
    "    LABEL_SIZE = 32\n",
    "    IMG_FEAT_SIZE = 2048+6+LABEL_SIZE\n",
    "    WORD_EMBED_SIZE = 1024\n",
    "    LAYER = 12\n",
    "    HIDDEN_SIZE = 1024\n",
    "    MULTI_HEAD = 16\n",
    "    DROPOUT_R = 0.1\n",
    "    FLAT_MLP_SIZE = 512\n",
    "    FLAT_GLIMPSES = 1\n",
    "    FLAT_OUT_SIZE = 2048\n",
    "    FF_SIZE = HIDDEN_SIZE*4\n",
    "    HIDDEN_SIZE_HEAD = HIDDEN_SIZE // MULTI_HEAD\n",
    "    OPT_BETAS = (0.9, 0.98)\n",
    "    OPT_EPS = 1e-9\n",
    "    TRAIN_SIZE = 3000000\n",
    "\n",
    "__C = params()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### load data\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "trash = {'!', '$', \"'ll\", \"'s\", ',', '&', ':', 'and', 'cut', 'is', 'are', 'was'}\n",
    "trash_replace = ['\"hey siri, play some', 'however, ', 'yin and yang, ',\n",
    "                 'shopping mall/']\n",
    "\n",
    "def process(x):\n",
    "    tmp = x.split()\n",
    "    if tmp[0] in trash: x = ' '.join(tmp[1:])\n",
    "    if tmp[0][0] == '-': x = x[1:]\n",
    "    for tr in trash_replace:\n",
    "        x = x.replace(tr, '')\n",
    "    return x\n",
    "\n",
    "def normalize(x):\n",
    "    ret = x['boxes'].copy()\n",
    "    ret[:,0] /= x['image_h']\n",
    "    ret[:,1] /= x['image_w']\n",
    "    ret[:,2] /= x['image_h']\n",
    "    ret[:,3] /= x['image_w']\n",
    "    wh = (ret[:,2]-ret[:,0]) * (ret[:,3]-ret[:,1])\n",
    "    wh2 = (ret[:,2]-ret[:,0]) / (ret[:,3]-ret[:,1]+1e-6)\n",
    "    ret = np.hstack((ret, wh.reshape(-1,1), wh2.reshape(-1,1)))\n",
    "    return ret\n",
    "\n",
    "def load_data(file_name, reset=False, decode=True):\n",
    "    ret = pd.read_csv(file_name, sep='\\t')\n",
    "    if decode:\n",
    "        ret['boxes'] = ret['boxes'].apply(lambda x: np.frombuffer(base64.b64decode(x), dtype=np.float32).reshape(-1, 4))\n",
    "        ret['features'] = ret['features'].apply(lambda x: np.frombuffer(base64.b64decode(x), dtype=np.float32).reshape(-1, 2048))\n",
    "        ret['class_labels'] = ret['class_labels'].apply(lambda x: np.frombuffer(base64.b64decode(x), dtype=np.int64).reshape(-1, 1))\n",
    "        ret['boxes'] = ret.apply(lambda x: normalize(x), axis=1)\n",
    "        ret['features'] = ret.apply(lambda x: np.concatenate((x['class_labels'], x['features'], x['boxes']), axis=1)[:max_img_len], axis=1)\n",
    "    ret['query'] = ret['query'].apply(lambda x: process(x))\n",
    "    # reset query_id\n",
    "    if reset:\n",
    "        query2qid = {query: qid for qid, (query, _) in enumerate(tqdm(ret.groupby('query')))}\n",
    "        ret['query_id'] = ret.apply(lambda x: query2qid[x['query']], axis=1)\n",
    "    return ret"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [],
   "source": [
    "path = './'\n",
    "test = load_data(path+'valid.tsv')\n",
    "testA = load_data(path+'testB.tsv')\n",
    "answers = json.loads(open(path+'valid_answer.json', 'r').read())"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### preprocess\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 496/496 [00:00<00:00, 3138.40it/s]\n",
      "100%|██████████| 994/994 [00:00<00:00, 3378.95it/s]\n"
     ]
    }
   ],
   "source": [
    "# load pre-trained model\n",
    "take = 'roberta-large'\n",
    "emb_size = __C.WORD_EMBED_SIZE\n",
    "tokenizer = AutoTokenizer.from_pretrained(take)\n",
    "pretrained_emb = AutoModel.from_pretrained(take)\n",
    "pad_id = tokenizer.pad_token_id\n",
    "\n",
    "qid2token = {qid: tokenizer.encode(group['query'].values[0]) for qid, group in tqdm(test.groupby('query_id'))}\n",
    "test['token'] = test['query_id'].apply(lambda x: qid2token[x])\n",
    "qid2token = {qid: tokenizer.encode(group['query'].values[0]) for qid, group in tqdm(testA.groupby('query_id'))}\n",
    "testA['token'] = testA['query_id'].apply(lambda x: qid2token[x])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### model\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "class FC(nn.Module):\n",
    "    def __init__(self, in_size, out_size, dropout_r=0., use_relu=True):\n",
    "        super(FC, self).__init__()\n",
    "        self.dropout_r = dropout_r\n",
    "        self.use_relu = use_relu\n",
    "\n",
    "        self.linear = nn.Linear(in_size, out_size)\n",
    "        self.relu = nn.ReLU(inplace=True)\n",
    "        self.dropout = nn.Dropout(dropout_r)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.linear(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.dropout(x)\n",
    "        return x\n",
    "\n",
    "\n",
    "class MLP(nn.Module):\n",
    "    def __init__(self, in_size, mid_size, out_size, dropout_r=0., use_relu=True):\n",
    "        super(MLP, self).__init__()\n",
    "\n",
    "        self.fc = FC(in_size, mid_size, dropout_r=dropout_r, use_relu=use_relu)\n",
    "        self.linear = nn.Linear(mid_size, out_size)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.linear(self.fc(x))\n",
    "\n",
    "\n",
    "class LayerNorm(nn.Module):\n",
    "    def __init__(self, size, eps=1e-6):\n",
    "        super(LayerNorm, self).__init__()\n",
    "        self.eps = eps\n",
    "\n",
    "        self.a_2 = nn.Parameter(torch.ones(size))\n",
    "        self.b_2 = nn.Parameter(torch.zeros(size))\n",
    "\n",
    "    def forward(self, x):\n",
    "        mean = x.mean(-1, keepdim=True)\n",
    "        std = x.std(-1, keepdim=True)\n",
    "        return self.a_2 * (x-mean) / (std+self.eps) + self.b_2\n",
    "    \n",
    "class MHAtt(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(MHAtt, self).__init__()\n",
    "        self.__C = __C\n",
    "\n",
    "        self.linear_v = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.linear_k = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.linear_q = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.linear_merge = nn.Linear(__C.HIDDEN_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.dropout = nn.Dropout(__C.DROPOUT_R)\n",
    "\n",
    "    def forward(self, v, k, q, mask):\n",
    "        n_batches = q.size(0)\n",
    "        v = self.linear_v(v).view(n_batches,\n",
    "                                  -1,\n",
    "                                  self.__C.MULTI_HEAD,\n",
    "                                  self.__C.HIDDEN_SIZE_HEAD).transpose(1, 2)\n",
    "        k = self.linear_k(k).view(n_batches,\n",
    "                                  -1,\n",
    "                                  self.__C.MULTI_HEAD,\n",
    "                                  self.__C.HIDDEN_SIZE_HEAD).transpose(1, 2)\n",
    "        q = self.linear_q(q).view(n_batches,\n",
    "                                  -1,\n",
    "                                  self.__C.MULTI_HEAD,\n",
    "                                  self.__C.HIDDEN_SIZE_HEAD).transpose(1, 2)\n",
    "\n",
    "        atted = self.att(v, k, q, mask)\n",
    "        atted = atted.transpose(1, 2).contiguous().view(n_batches, -1, self.__C.HIDDEN_SIZE)\n",
    "        atted = self.linear_merge(atted)\n",
    "\n",
    "        return atted\n",
    "\n",
    "    def att(self, value, key, query, mask):\n",
    "        d_k = query.size(-1)\n",
    "        scores = torch.matmul(query, key.transpose(-2, -1))/math.sqrt(d_k)\n",
    "        scores = scores.masked_fill(mask, -1e9)\n",
    "        att_map = F.softmax(scores, dim=-1)\n",
    "        att_map = self.dropout(att_map)\n",
    "        return torch.matmul(att_map, value)\n",
    "\n",
    "# ---------------------------\n",
    "# ---- Feed Forward Nets ----\n",
    "# ---------------------------\n",
    "\n",
    "class FFN(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(FFN, self).__init__()\n",
    "\n",
    "        self.mlp = MLP(in_size=__C.HIDDEN_SIZE,\n",
    "                       mid_size=__C.FF_SIZE,\n",
    "                       out_size=__C.HIDDEN_SIZE,\n",
    "                       dropout_r=__C.DROPOUT_R,\n",
    "                       use_relu=True)\n",
    "\n",
    "    def forward(self, x):\n",
    "        return self.mlp(x)\n",
    "\n",
    "# ------------------------\n",
    "# ---- Self Attention ----\n",
    "# ------------------------\n",
    "\n",
    "class SA(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(SA, self).__init__()\n",
    "\n",
    "        self.mhatt = MHAtt(__C)\n",
    "        self.ffn = FFN(__C)\n",
    "\n",
    "        self.dropout1 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm1 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "        self.dropout2 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm2 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "    def forward(self, x, x_mask):\n",
    "        x = self.norm1(x + self.dropout1(self.mhatt(x, x, x, x_mask)))\n",
    "        x = self.norm2(x + self.dropout2(self.ffn(x)))\n",
    "        return x\n",
    "\n",
    "# -------------------------------\n",
    "# ---- Self Guided Attention ----\n",
    "# -------------------------------\n",
    "\n",
    "class SGA(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(SGA, self).__init__()\n",
    "\n",
    "        self.mhatt1 = MHAtt(__C)\n",
    "        self.mhatt2 = MHAtt(__C)\n",
    "        self.ffn = FFN(__C)\n",
    "\n",
    "        self.dropout1 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm1 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "        self.dropout2 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm2 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "        self.dropout3 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm3 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "    def forward(self, x, y, x_mask, y_mask):\n",
    "        x = self.norm1(x + self.dropout1(self.mhatt1(x, x, x, x_mask)))\n",
    "        x = self.norm2(x + self.dropout2(self.mhatt2(y, y, x, y_mask)))\n",
    "        x = self.norm3(x + self.dropout3(self.ffn(x)))\n",
    "        return x\n",
    "    \n",
    "class GA(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(GA, self).__init__()\n",
    "\n",
    "        self.mhatt2 = MHAtt(__C)\n",
    "        self.ffn = FFN(__C)\n",
    "\n",
    "        self.dropout2 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm2 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "        self.dropout3 = nn.Dropout(__C.DROPOUT_R)\n",
    "        self.norm3 = LayerNorm(__C.HIDDEN_SIZE)\n",
    "\n",
    "    def forward(self, x, y, x_mask, y_mask):\n",
    "        x = self.norm2(x + self.dropout2(self.mhatt2(y, y, x, y_mask)))\n",
    "        x = self.norm3(x + self.dropout3(self.ffn(x)))\n",
    "        return x\n",
    "        \n",
    "# ------------------------------------------------\n",
    "# ---- MAC Layers Cascaded by Encoder-Decoder ----\n",
    "# ------------------------------------------------\n",
    "\n",
    "class MCA_ED(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(MCA_ED, self).__init__()\n",
    "\n",
    "        self.enc_list = nn.ModuleList([SA(__C) for _ in range(__C.LAYER)])\n",
    "        self.dec_list = nn.ModuleList([SGA(__C) for _ in range(__C.LAYER)])\n",
    "\n",
    "    def forward(self, x, y, x_mask, y_mask):\n",
    "        # Get hidden vector\n",
    "        for enc in self.enc_list:\n",
    "            x = enc(x, x_mask)\n",
    "        for dec in self.dec_list:\n",
    "            y = dec(y, x, y_mask, x_mask)\n",
    "        return x, y\n",
    "      \n",
    "    \n",
    "class AttFlat(nn.Module):\n",
    "    def __init__(self, __C):\n",
    "        super(AttFlat, self).__init__()\n",
    "        self.__C = __C\n",
    "\n",
    "        self.mlp = MLP(in_size=__C.HIDDEN_SIZE,\n",
    "                       mid_size=__C.FLAT_MLP_SIZE,\n",
    "                       out_size=__C.FLAT_GLIMPSES,\n",
    "                       dropout_r=__C.DROPOUT_R,\n",
    "                       use_relu=True)\n",
    "        self.linear_merge = nn.Linear(__C.HIDDEN_SIZE*__C.FLAT_GLIMPSES, __C.FLAT_OUT_SIZE)\n",
    "\n",
    "    def forward(self, x, x_mask):\n",
    "        att = self.mlp(x)\n",
    "        att = att.masked_fill(x_mask.squeeze(1).squeeze(1).unsqueeze(2), -1e9)\n",
    "        att = F.softmax(att, dim=1)\n",
    "\n",
    "        att_list = []\n",
    "        for i in range(self.__C.FLAT_GLIMPSES):\n",
    "            att_list.append(torch.sum(att[:,:,i:i+1]*x, dim=1))\n",
    "\n",
    "        x_atted = torch.cat(att_list, dim=1)\n",
    "        x_atted = self.linear_merge(x_atted)\n",
    "        return x_atted\n",
    "\n",
    "# -------------------------\n",
    "# ---- Main MCAN Model ----\n",
    "# -------------------------\n",
    "\n",
    "class Net(nn.Module):\n",
    "    def __init__(self, __C, pretrained_emb, answer_size):\n",
    "        super(Net, self).__init__()\n",
    "\n",
    "        self.embedding = pretrained_emb.embeddings\n",
    "        self.label_emb = nn.Embedding(33, __C.LABEL_SIZE)\n",
    "        self.img_feat_linear = MLP(__C.IMG_FEAT_SIZE, __C.IMG_FEAT_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.lang_feat_linear = nn.Linear(__C.WORD_EMBED_SIZE, __C.HIDDEN_SIZE)\n",
    "        self.backbone = MCA_ED(__C)\n",
    "\n",
    "        self.attflat_img = AttFlat(__C)\n",
    "        self.attflat_lang = AttFlat(__C)\n",
    "        \n",
    "        self.proj_norm_lang = LayerNorm(__C.FLAT_OUT_SIZE)\n",
    "        self.proj_norm_img = LayerNorm(__C.FLAT_OUT_SIZE)\n",
    "        self.proj_norm_mul = LayerNorm(__C.FLAT_OUT_SIZE)\n",
    "        self.proj_norm_dis = LayerNorm(__C.FLAT_OUT_SIZE)\n",
    "        self.proj = MLP(__C.FLAT_OUT_SIZE*4, __C.FLAT_OUT_SIZE*2, answer_size)\n",
    "\n",
    "    def forward(self, ques_ix, img_feats):\n",
    "        proj_feats = []\n",
    "        for img_feat in img_feats:\n",
    "            # Make mask\n",
    "            lang_feat_mask = self.make_mask(ques_ix.unsqueeze(2), pad_id)\n",
    "            img_feat_mask = self.make_mask(img_feat, 0)\n",
    "\n",
    "            # Pre-process Language Feature\n",
    "            lang_feat = self.embedding(ques_ix)\n",
    "            lang_feat = self.lang_feat_linear(lang_feat)\n",
    "\n",
    "            # Pre-process Image Feature\n",
    "            label_feat = self.label_emb(img_feat[:,:,0].long())\n",
    "            img_feat = torch.cat((img_feat[:,:,1:], label_feat), dim=2)\n",
    "            img_feat = self.img_feat_linear(img_feat)\n",
    "\n",
    "            # Backbone Framework\n",
    "            lang_feat, img_feat = self.backbone(lang_feat, img_feat, lang_feat_mask, img_feat_mask)\n",
    "            lang_feat = self.attflat_lang(lang_feat, lang_feat_mask)\n",
    "            img_feat = self.attflat_img(img_feat, img_feat_mask)\n",
    "            distance = torch.abs(lang_feat-img_feat)\n",
    "            \n",
    "            proj_feat = torch.cat((self.proj_norm_lang(lang_feat),\n",
    "                                   self.proj_norm_img(img_feat),\n",
    "                                   self.proj_norm_mul(lang_feat*img_feat),\n",
    "                                   self.proj_norm_dis(distance)\n",
    "                                  ), dim=1)\n",
    "            proj_feat = self.proj(proj_feat)\n",
    "            proj_feats.append(proj_feat)\n",
    "        return proj_feats\n",
    "\n",
    "    # Masking\n",
    "    def make_mask(self, feature, target):\n",
    "        return (torch.sum(torch.abs(feature), dim=-1) == target).unsqueeze(1).unsqueeze(2)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### train\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict(model):\n",
    "    model = model.eval()\n",
    "    preds = {}\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for qid, group in tqdm(test.groupby('query_id')):\n",
    "            # prepare batch\n",
    "            tokens, features = group['token'].values.tolist(), group['features'].values.tolist()\n",
    "            max_len_f = len(max(features, key=lambda x: len(x)))\n",
    "            features = [np.concatenate((feature, np.zeros((max_len_f-feature.shape[0], feature.shape[1]))), axis=0) for feature in features]\n",
    "            # # to tensor\n",
    "            tokens = torch.LongTensor(tokens).to(device)\n",
    "            features = torch.FloatTensor(features).to(device)\n",
    "            # predict\n",
    "            out = model(tokens, (features,))[0].view(-1)\n",
    "            pred = [(pid, val) for pid, val in zip(group['product_id'].values.tolist(), out.tolist())]\n",
    "            pred.sort(key=lambda x: x[1], reverse=True)\n",
    "            preds[qid] = [pid for pid, _ in pred[:5]]\n",
    "            \n",
    "    model = model.train()\n",
    "    return preds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "class CustomDataset(data.Dataset):\n",
    "    def __init__(self, train_x):\n",
    "        self.train_x = train_x\n",
    "        \n",
    "    def __getitem__(self, index):\n",
    "        tokens, pos_features, neg_features = self.train_x[index][0], self.train_x[index][1], self.train_x[index][2]\n",
    "        return [tokens, pos_features, neg_features]\n",
    "    \n",
    "    def __len__(self):\n",
    "        return len(self.train_x)\n",
    "    \n",
    "def collate_fn(batch):\n",
    "    tokens, pos_features, neg_features = zip(*batch)\n",
    "    max_len_t, max_len_pf, max_len_nf = len(max(tokens, key=lambda x: len(x))), len(max(pos_features, key=lambda x: len(x))), len(max(neg_features, key=lambda x: len(x)))\n",
    "    tokens, pos_features, neg_features = [token+[pad_id]*(max_len_t-len(token)) for token in tokens], [np.concatenate((feature, np.zeros((max_len_pf-feature.shape[0], feature.shape[1]))), axis=0) for feature in pos_features], [np.concatenate((feature, np.zeros((max_len_nf-feature.shape[0], feature.shape[1]))), axis=0) for feature in neg_features]\n",
    "    return torch.LongTensor(tokens), torch.FloatTensor(pos_features), torch.FloatTensor(neg_features)\n",
    "\n",
    "def custom_schedule(optimizer, num_warmup_steps, num_training_steps, num_cycles=0.5, amplitude=0.1, last_epoch=-1):\n",
    "    \n",
    "    def lr_lambda(current_step):\n",
    "        if current_step < num_warmup_steps:\n",
    "            return float(current_step) / float(max(1, num_warmup_steps))\n",
    "        progress = 2.0 * math.pi * float(num_cycles) * float(current_step-num_warmup_steps) / float(max(1, num_training_steps-num_warmup_steps))\n",
    "        linear = float(num_training_steps-current_step) / float(max(1, num_training_steps-num_warmup_steps))\n",
    "        return abs(linear + math.sin(progress)*linear*amplitude)\n",
    "\n",
    "    return LambdaLR(optimizer, lr_lambda, last_epoch)\n",
    "\n",
    "def shuffle(x):\n",
    "    idxs = [i for i in range(x.shape[0])]\n",
    "    random.shuffle(idxs)\n",
    "    return x[idxs]\n",
    "\n",
    "def nDCG_score(preds, answers):\n",
    "    iDCG = sum([sum([np.log(2)/np.log(i+2) for i in range(min(len(answer), 5))]) \\\n",
    "                for answer in list(answers.values())])\n",
    "    DCG = sum([sum([np.log(2)/np.log(i+2) if preds[qid][i] in answers[str(qid)] else 0 \\\n",
    "                    for i in range(len(preds[qid]))]) for qid in list(preds.keys())])\n",
    "    return DCG/iDCG\n",
    "\n",
    "class FocalLoss(nn.Module):\n",
    "    def __init__(self, alpha=1, gamma=2, logits=False, reduce=True):\n",
    "        super(FocalLoss, self).__init__()\n",
    "        self.alpha = alpha\n",
    "        self.gamma = gamma\n",
    "        self.logits = logits\n",
    "        self.reduce = reduce\n",
    "\n",
    "    def forward(self, inputs, targets):\n",
    "        if self.logits:\n",
    "            BCE_loss = F.binary_cross_entropy_with_logits(inputs, targets, reduce=False)\n",
    "        else:\n",
    "            BCE_loss = F.binary_cross_entropy(inputs, targets, reduce=False)\n",
    "        pt = torch.exp(-BCE_loss)\n",
    "        F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss\n",
    "\n",
    "        if self.reduce:\n",
    "            return torch.mean(F_loss)\n",
    "        else:\n",
    "            return F_loss"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "initializing model...\n"
     ]
    }
   ],
   "source": [
    "print('initializing model...')\n",
    "nDCGs = []\n",
    "best_nDCG = 0.0\n",
    "model = Net(__C, pretrained_emb, 1).to(device)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### prediction\n",
    "***"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [],
   "source": [
    "def predict(model, test, pad_len):\n",
    "    model.eval()\n",
    "    counts = Counter(test['product_id'].values.tolist())\n",
    "    preds = {}\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for qid, group in tqdm(test.groupby('query_id')):\n",
    "            # prepare batch\n",
    "            tokens, features = group['token'].values.tolist(), group['features'].values.tolist()\n",
    "            max_len_f = len(max(features, key=lambda x: len(x)))\n",
    "            features = [np.concatenate((feature, np.zeros((max_len_f-feature.shape[0], feature.shape[1]))), axis=0) for feature in features]\n",
    "            # # to tensor\n",
    "            tokens = torch.LongTensor(tokens).to(device)\n",
    "            features = torch.FloatTensor(features).to(device)\n",
    "            # predict\n",
    "            out = model(tokens, (features,))[0].view(-1)\n",
    "            pred = [(pid, val) for pid, val in zip(group['product_id'].values.tolist(), out.tolist())]\n",
    "            pred.sort(key=lambda x: x[1], reverse=True)\n",
    "            assert len(pred) <= pad_len\n",
    "            pid, score = [p for p, s in pred], [s for p, s in pred]\n",
    "            pid, score = pid+[np.nan]*(pad_len-len(pred)), score+[np.nan]*(pad_len-len(pred))\n",
    "            preds[qid] = pid+score\n",
    "            \n",
    "    return preds"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 2330; fold: 30\n",
      "seed: 2330; fold: 31\n",
      "seed: 2330; fold: 32\n",
      "seed: 2330; fold: 33\n",
      "seed: 2330; fold: 34\n",
      "seed: 2330; fold: 35\n",
      "seed: 2330; fold: 36\n",
      "seed: 2330; fold: 37\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:18<00:00, 12.63it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 2330; fold: 38\n",
      "seed: 2330; fold: 39\n",
      "seed: 612; fold: 30\n",
      "seed: 612; fold: 31\n",
      "seed: 612; fold: 32\n",
      "seed: 612; fold: 33\n",
      "seed: 612; fold: 34\n",
      "seed: 612; fold: 35\n",
      "seed: 612; fold: 36\n",
      "seed: 612; fold: 37\n",
      "seed: 612; fold: 38\n",
      "seed: 612; fold: 39\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:19<00:00, 12.55it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 24; fold: 30\n",
      "seed: 24; fold: 31\n",
      "seed: 24; fold: 32\n",
      "seed: 24; fold: 33\n",
      "seed: 24; fold: 34\n",
      "seed: 24; fold: 35\n",
      "seed: 24; fold: 36\n",
      "seed: 24; fold: 37\n",
      "seed: 24; fold: 38\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:17<00:00, 12.78it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 24; fold: 39\n",
      "seed: 25; fold: 30\n",
      "seed: 25; fold: 31\n",
      "seed: 25; fold: 32\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:20<00:00, 12.41it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 25; fold: 33\n",
      "seed: 25; fold: 34\n",
      "seed: 25; fold: 35\n",
      "seed: 25; fold: 36\n",
      "seed: 25; fold: 37\n",
      "seed: 25; fold: 38\n",
      "seed: 25; fold: 39\n",
      "seed: 2077; fold: 30\n",
      "seed: 2077; fold: 31\n",
      "seed: 2077; fold: 32\n",
      "seed: 2077; fold: 33\n",
      "seed: 2077; fold: 34\n",
      "seed: 2077; fold: 35\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:18<00:00, 12.72it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 2077; fold: 36\n",
      "seed: 2077; fold: 37\n",
      "seed: 2077; fold: 38\n",
      "seed: 2077; fold: 39\n",
      "seed: 2049; fold: 30\n",
      "seed: 2049; fold: 31\n",
      "seed: 2049; fold: 32\n",
      "seed: 2049; fold: 33\n",
      "seed: 2049; fold: 34\n",
      "seed: 2049; fold: 35\n",
      "seed: 2049; fold: 36\n",
      "seed: 2049; fold: 37\n",
      "seed: 2049; fold: 38\n",
      "seed: 2049; fold: 39\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:17<00:00, 12.87it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 2045; fold: 30\n",
      "seed: 2045; fold: 31\n",
      "seed: 2045; fold: 32\n",
      "seed: 2045; fold: 33\n",
      "seed: 2045; fold: 34\n",
      "seed: 2045; fold: 35\n",
      "seed: 2045; fold: 36\n",
      "seed: 2045; fold: 37\n",
      "seed: 2045; fold: 38\n",
      "seed: 2045; fold: 39\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:20<00:00, 12.34it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 1917; fold: 30\n",
      "seed: 1917; fold: 31\n",
      "seed: 1917; fold: 32\n",
      "seed: 1917; fold: 33\n",
      "seed: 1917; fold: 34\n",
      "seed: 1917; fold: 35\n",
      "seed: 1917; fold: 36\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:16<00:00, 13.07it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 1917; fold: 37\n",
      "seed: 1917; fold: 38\n",
      "seed: 1917; fold: 39\n",
      "seed: 78667; fold: 30\n",
      "seed: 78667; fold: 31\n",
      "seed: 78667; fold: 32\n",
      "seed: 78667; fold: 33\n",
      "seed: 78667; fold: 34\n",
      "seed: 78667; fold: 35\n",
      "seed: 78667; fold: 36\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:17<00:00, 12.75it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 78667; fold: 37\n",
      "seed: 78667; fold: 38\n",
      "seed: 78667; fold: 39\n",
      "seed: 68654; fold: 30\n",
      "seed: 68654; fold: 31\n",
      "seed: 68654; fold: 32\n",
      "seed: 68654; fold: 33\n",
      "seed: 68654; fold: 34\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:17<00:00, 12.84it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 68654; fold: 35\n",
      "seed: 68654; fold: 36\n",
      "seed: 68654; fold: 37\n",
      "seed: 68654; fold: 38\n",
      "seed: 68654; fold: 39\n",
      "seed: 56474; fold: 30\n",
      "seed: 56474; fold: 31\n",
      "seed: 56474; fold: 32\n",
      "seed: 56474; fold: 33\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:18<00:00, 12.62it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 56474; fold: 34\n",
      "seed: 56474; fold: 35\n",
      "seed: 56474; fold: 36\n",
      "seed: 56474; fold: 37\n",
      "seed: 56474; fold: 38\n",
      "seed: 56474; fold: 39\n",
      "seed: 56464; fold: 30\n",
      "seed: 56464; fold: 31\n",
      "seed: 56464; fold: 32\n",
      "seed: 56464; fold: 33\n",
      "seed: 56464; fold: 34\n",
      "seed: 56464; fold: 35\n",
      "seed: 56464; fold: 36\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:16<00:00, 12.99it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 56464; fold: 37\n",
      "seed: 56464; fold: 38\n",
      "seed: 56464; fold: 39\n",
      "seed: 54367; fold: 30\n",
      "seed: 54367; fold: 31\n",
      "seed: 54367; fold: 32\n",
      "seed: 54367; fold: 33\n",
      "seed: 54367; fold: 34\n",
      "seed: 54367; fold: 35\n",
      "seed: 54367; fold: 36\n",
      "seed: 54367; fold: 37\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:16<00:00, 12.97it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 54367; fold: 38\n",
      "seed: 54367; fold: 39\n",
      "seed: 4547; fold: 30\n",
      "seed: 4547; fold: 31\n",
      "seed: 4547; fold: 32\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:16<00:00, 12.93it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 4547; fold: 33\n",
      "seed: 4547; fold: 34\n",
      "seed: 4547; fold: 35\n",
      "seed: 4547; fold: 36\n",
      "seed: 4547; fold: 37\n",
      "seed: 4547; fold: 38\n",
      "seed: 4547; fold: 39\n",
      "seed: 437; fold: 30\n",
      "seed: 437; fold: 31\n",
      "seed: 437; fold: 32\n",
      "seed: 437; fold: 33\n",
      "seed: 437; fold: 34\n",
      "seed: 437; fold: 35\n",
      "seed: 437; fold: 36\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:16<00:00, 12.93it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 437; fold: 37\n",
      "seed: 437; fold: 38\n",
      "seed: 437; fold: 39\n",
      "seed: 485; fold: 30\n",
      "seed: 485; fold: 31\n",
      "seed: 485; fold: 32\n",
      "seed: 485; fold: 33\n",
      "seed: 485; fold: 34\n",
      "seed: 485; fold: 35\n",
      "seed: 485; fold: 36\n",
      "seed: 485; fold: 37\n",
      "seed: 485; fold: 38\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:17<00:00, 12.81it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 485; fold: 39\n",
      "seed: 132; fold: 30\n",
      "seed: 132; fold: 31\n",
      "seed: 132; fold: 32\n",
      "seed: 132; fold: 33\n",
      "seed: 132; fold: 34\n",
      "seed: 132; fold: 35\n",
      "seed: 132; fold: 36\n",
      "seed: 132; fold: 37\n",
      "seed: 132; fold: 38\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:17<00:00, 12.83it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 132; fold: 39\n",
      "seed: 257; fold: 30\n",
      "seed: 257; fold: 31\n",
      "seed: 257; fold: 32\n",
      "seed: 257; fold: 33\n",
      "seed: 257; fold: 34\n",
      "seed: 257; fold: 35\n",
      "seed: 257; fold: 36\n",
      "seed: 257; fold: 37\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:17<00:00, 12.75it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 257; fold: 38\n",
      "seed: 257; fold: 39\n",
      "seed: 584; fold: 30\n",
      "seed: 584; fold: 31\n",
      "seed: 584; fold: 32\n",
      "seed: 584; fold: 33\n",
      "seed: 584; fold: 34\n",
      "seed: 584; fold: 35\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:16<00:00, 13.04it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 584; fold: 36\n",
      "seed: 584; fold: 37\n",
      "seed: 584; fold: 38\n",
      "seed: 584; fold: 39\n",
      "seed: 931; fold: 30\n",
      "seed: 931; fold: 31\n",
      "seed: 931; fold: 32\n",
      "seed: 931; fold: 33\n",
      "seed: 931; fold: 34\n",
      "seed: 931; fold: 35\n",
      "seed: 931; fold: 36\n",
      "seed: 931; fold: 37\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:17<00:00, 12.88it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 931; fold: 38\n",
      "seed: 931; fold: 39\n",
      "seed: 792; fold: 30\n",
      "seed: 792; fold: 31\n",
      "seed: 792; fold: 32\n",
      "seed: 792; fold: 33\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:17<00:00, 12.80it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 792; fold: 34\n",
      "seed: 792; fold: 35\n",
      "seed: 792; fold: 36\n",
      "seed: 792; fold: 37\n",
      "seed: 792; fold: 38\n",
      "seed: 792; fold: 39\n",
      "seed: 603; fold: 30\n",
      "seed: 603; fold: 31\n",
      "seed: 603; fold: 32\n",
      "seed: 603; fold: 33\n",
      "seed: 603; fold: 34\n",
      "seed: 603; fold: 35\n",
      "seed: 603; fold: 36\n",
      "seed: 603; fold: 37\n",
      "seed: 603; fold: 38\n",
      "seed: 603; fold: 39\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:17<00:00, 12.80it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 746; fold: 30\n",
      "seed: 746; fold: 31\n",
      "seed: 746; fold: 32\n",
      "seed: 746; fold: 33\n",
      "seed: 746; fold: 34\n",
      "seed: 746; fold: 35\n",
      "seed: 746; fold: 36\n",
      "seed: 746; fold: 37\n",
      "seed: 746; fold: 38\n",
      "seed: 746; fold: 39\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:18<00:00, 12.68it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 480; fold: 30\n",
      "seed: 480; fold: 31\n",
      "seed: 480; fold: 32\n",
      "seed: 480; fold: 33\n",
      "seed: 480; fold: 34\n",
      "seed: 480; fold: 35\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "100%|██████████| 994/994 [01:18<00:00, 12.67it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "seed: 480; fold: 36\n",
      "seed: 480; fold: 37\n",
      "seed: 480; fold: 38\n",
      "seed: 480; fold: 39\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "seeds = [2330, 612, 24, 25, 2077, 2049, 2045, 1917,\n",
    "         78667, 68654, 56474, 56464, 54367, 4547, 437, 485,\n",
    "         132, 257, 584, 931, 792, 603, 746, 480]\n",
    "folds = [i for i in range(30, 40)]\n",
    "pad_len = 30\n",
    "\n",
    "for seed in seeds:\n",
    "    for fold in folds:\n",
    "        print('seed: {}; fold: {}'.format(seed, fold))\n",
    "        # load model weights\n",
    "        try: model.load_state_dict(torch.load(path+'models/model_MCAN-RoBERTa_pair-cat_box_tfidf-neg_focal_all_shared_{}_{}'.format(seed, fold), map_location=device))\n",
    "        except: continue\n",
    "        # predict\n",
    "        preds = predict(model, testA, pad_len)\n",
    "        # write to file\n",
    "        header = ['qid'] + ['p'+str(i) for i in range(pad_len)] + ['s'+str(i) for i in range(pad_len)]\n",
    "        with open('predictions/prediction_all_{}_{}.csv'.format(seed, fold), 'w', newline='') as f:\n",
    "            w = csv.writer(f)\n",
    "            w.writerow(header)\n",
    "            for qid in sorted(list(preds.keys())):\n",
    "                w.writerow([qid]+preds[qid])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
