{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "c5fdd9c2",
   "metadata": {
    "_cell_guid": "b1076dfc-b9ad-4769-8c92-a6c4dae69d19",
    "_uuid": "8f2839f25d086af736a60e9eeb907d3b93b6e0e5",
    "execution": {
     "iopub.execute_input": "2023-11-10T18:18:35.306290Z",
     "iopub.status.busy": "2023-11-10T18:18:35.305921Z",
     "iopub.status.idle": "2023-11-10T18:18:42.662390Z",
     "shell.execute_reply": "2023-11-10T18:18:42.661221Z"
    },
    "papermill": {
     "duration": 7.364794,
     "end_time": "2023-11-10T18:18:42.664788",
     "exception": false,
     "start_time": "2023-11-10T18:18:35.299994",
     "status": "completed"
    },
    "tags": [],
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/conda/lib/python3.10/site-packages/scipy/__init__.py:146: UserWarning: A NumPy version >=1.16.5 and <1.23.0 is required for this version of SciPy (detected version 1.23.5\n",
      "  warnings.warn(f\"A NumPy version >={np_minversion} and <{np_maxversion}\"\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>posting_id</th>\n",
       "      <th>image</th>\n",
       "      <th>image_phash</th>\n",
       "      <th>title</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>test_2255846744</td>\n",
       "      <td>../input/shopee-product-matching/test_images/0006c8e5462ae52167402bac1c2e916e.jpg</td>\n",
       "      <td>ecc292392dc7687a</td>\n",
       "      <td>Edufuntoys - CHARACTER PHONE ada lampu dan musik/ mainan telepon</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>test_3588702337</td>\n",
       "      <td>../input/shopee-product-matching/test_images/0007585c4d0f932859339129f709bfdc.jpg</td>\n",
       "      <td>e9968f60d2699e2c</td>\n",
       "      <td>(Beli 1 Free Spatula) Masker Komedo | Blackheads Mask 10gr by Flawless Go Surabaya | Flawless.Go</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>test_4015706929</td>\n",
       "      <td>../input/shopee-product-matching/test_images/0008377d3662e83ef44e1881af38b879.jpg</td>\n",
       "      <td>ba81c17e3581cabe</td>\n",
       "      <td>READY Lemonilo Mie instant sehat kuah dan goreng</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "        posting_id  \\\n",
       "0  test_2255846744   \n",
       "1  test_3588702337   \n",
       "2  test_4015706929   \n",
       "\n",
       "                                                                               image  \\\n",
       "0  ../input/shopee-product-matching/test_images/0006c8e5462ae52167402bac1c2e916e.jpg   \n",
       "1  ../input/shopee-product-matching/test_images/0007585c4d0f932859339129f709bfdc.jpg   \n",
       "2  ../input/shopee-product-matching/test_images/0008377d3662e83ef44e1881af38b879.jpg   \n",
       "\n",
       "        image_phash  \\\n",
       "0  ecc292392dc7687a   \n",
       "1  e9968f60d2699e2c   \n",
       "2  ba81c17e3581cabe   \n",
       "\n",
       "                                                                                              title  \n",
       "0                                  Edufuntoys - CHARACTER PHONE ada lampu dan musik/ mainan telepon  \n",
       "1  (Beli 1 Free Spatula) Masker Komedo | Blackheads Mask 10gr by Flawless Go Surabaya | Flawless.Go  \n",
       "2                                                  READY Lemonilo Mie instant sehat kuah dan goreng  "
      ]
     },
     "execution_count": 1,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "COMPUTE_CV = False\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import torch\n",
    "import sys\n",
    "sys.path.append(\"../input/shopee-utils\")\n",
    "import helpers\n",
    "DATA_PATH = \"../input/shopee-product-matching/\"\n",
    "# DATA_PATH = \"../input/\"\n",
    "\n",
    "train = pd.read_csv(DATA_PATH + \"train.csv\")\n",
    "train[\"target\"] = train.label_group.map(train.groupby(\"label_group\").posting_id.agg(\"unique\").to_dict())\n",
    "train[\"target\"] = train[\"target\"].apply(lambda x: \" \".join(x))\n",
    "train[\"image\"] = DATA_PATH + \"train_images/\" + train[\"image\"]\n",
    "\n",
    "if COMPUTE_CV == False:\n",
    "    ttrain=train\n",
    "    train = pd.read_csv(DATA_PATH + \"test.csv\")\n",
    "    train[\"image\"] = DATA_PATH + \"test_images/\" + train[\"image\"]\n",
    "    \n",
    "\n",
    "train.head()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "538cc84d",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-10T18:18:42.676885Z",
     "iopub.status.busy": "2023-11-10T18:18:42.676532Z",
     "iopub.status.idle": "2023-11-10T18:18:52.133205Z",
     "shell.execute_reply": "2023-11-10T18:18:52.132100Z"
    },
    "papermill": {
     "duration": 9.465334,
     "end_time": "2023-11-10T18:18:52.135463",
     "exception": false,
     "start_time": "2023-11-10T18:18:42.670129",
     "status": "completed"
    },
    "tags": [],
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Building Model Backbone for eca_nfnet_l0 model\n"
     ]
    },
    {
     "data": {
      "application/vnd.jupyter.widget-view+json": {
       "model_id": "568df850d6f2407f94985b4a4cc4d29d",
       "version_major": 2,
       "version_minor": 0
      },
      "text/plain": [
       "  0%|          | 0/1 [00:00<?, ?it/s]"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Our image embeddings shape is (3, 11014)\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "\n",
    "import sys, os\n",
    "\n",
    "import pandas as pd\n",
    "\n",
    "import numpy as np\n",
    "\n",
    "from PIL import Image\n",
    "\n",
    "import albumentations\n",
    "\n",
    "from albumentations.pytorch.transforms import ToTensorV2\n",
    "import torch\n",
    "from torch import nn\n",
    "from torch.nn import Parameter\n",
    "from torch.nn import functional as F\n",
    "import torchvision.transforms as transforms\n",
    "import torchvision.datasets as datasets\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.autograd import Variable\n",
    "from torch.utils.data.dataset import Dataset\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "import math\n",
    "import cv2\n",
    "import timm\n",
    "import os\n",
    "import random\n",
    "import gc\n",
    "\n",
    "from sklearn.preprocessing import normalize\n",
    "\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.model_selection import GroupKFold\n",
    "from sklearn.neighbors import NearestNeighbors\n",
    "from tqdm.notebook import tqdm\n",
    "\n",
    "\n",
    "from custom_scheduler import ShopeeScheduler\n",
    "from custom_activation import replace_activations, Mish\n",
    "from custom_optimizer import Ranger\n",
    "\n",
    "\n",
    "import warnings\n",
    "\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "\n",
    "class CFG:\n",
    "    # data augmentation\n",
    "    IMG_SIZE = 512\n",
    "    MEAN = [0.485, 0.456, 0.406]\n",
    "    STD = [0.229, 0.224, 0.225]\n",
    "\n",
    "    SEED = 2023\n",
    "\n",
    "    # data split\n",
    "    N_SPLITS = 5\n",
    "    TEST_FOLD = 0\n",
    "    VALID_FOLD = 1\n",
    "\n",
    "    EPOCHS = 8\n",
    "    BATCH_SIZE = 8\n",
    "\n",
    "    NUM_WORKERS = 4\n",
    "    DEVICE = \"cuda:0\"\n",
    "\n",
    "    CLASSES = 11014  #!!!!!!!!!!!!!\n",
    "    SCALE = 30\n",
    "    MARGIN = 0.6\n",
    "    \n",
    "    SCHEDULER_PARAMS = {\n",
    "            \"lr_start\": 1e-5,\n",
    "            \"lr_max\": 1e-5 * 32,\n",
    "            \"lr_min\": 1e-6,\n",
    "            \"lr_ramp_ep\": 5,\n",
    "            \"lr_sus_ep\": 0,\n",
    "            \"lr_decay\": 0.8,\n",
    "        }\n",
    "    \n",
    "\n",
    "    MODEL_NAME = \"eca_nfnet_l0\"\n",
    "    FC_DIM = 512\n",
    "    MODEL_PATH = f\"../input/shopee-models/{MODEL_NAME}_arc_face_epoch_{EPOCHS}_bs_{BATCH_SIZE}_margin_{MARGIN}.pt\"\n",
    "    FEAT_PATH = f\"../input/shopee-embeddings/{MODEL_NAME}_arcface.npy\"\n",
    "\n",
    "\n",
    "def get_train_transforms():\n",
    "    return albumentations.Compose(\n",
    "        [\n",
    "            albumentations.Resize(CFG.IMG_SIZE, CFG.IMG_SIZE, always_apply=True),\n",
    "            albumentations.HorizontalFlip(p=0.5),\n",
    "            albumentations.VerticalFlip(p=0.5),\n",
    "            albumentations.Rotate(limit=120, p=0.8),\n",
    "            albumentations.RandomBrightness(limit=(0.09, 0.6), p=0.5),\n",
    "            albumentations.Normalize(mean=CFG.MEAN, std=CFG.STD),\n",
    "            ToTensorV2(p=1.0),\n",
    "        ]\n",
    "    )\n",
    "\n",
    "\n",
    "def get_valid_transforms():\n",
    "    return albumentations.Compose(\n",
    "        [\n",
    "            albumentations.Resize(CFG.IMG_SIZE, CFG.IMG_SIZE, always_apply=True),\n",
    "            albumentations.Normalize(mean=CFG.MEAN, std=CFG.STD),\n",
    "            ToTensorV2(p=1.0),\n",
    "        ]\n",
    "    )\n",
    "\n",
    "\n",
    "def get_test_transforms():\n",
    "    return albumentations.Compose(\n",
    "        [\n",
    "            albumentations.Resize(CFG.IMG_SIZE, CFG.IMG_SIZE, always_apply=True),\n",
    "            albumentations.Normalize(mean=CFG.MEAN, std=CFG.STD),\n",
    "            ToTensorV2(p=1.0),\n",
    "        ]\n",
    "    )\n",
    "\n",
    "\n",
    "class ShopeeImageDataset(Dataset):\n",
    "    \"\"\"for training\"\"\"\n",
    "\n",
    "    def __init__(self, df, transform=None, train=True):\n",
    "        self.df = df\n",
    "        self.img_path = df[\"image\"].values\n",
    "        self.transform = transform\n",
    "        self.train = train\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        image = cv2.imread(self.img_path[index])\n",
    "        image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n",
    "\n",
    "        if self.transform:\n",
    "            augmented = self.transform(image=image)\n",
    "            image = augmented[\"image\"]\n",
    "            \n",
    "        if self.train:\n",
    "            label = self.df.label_group[index]\n",
    "            label = torch.tensor(label).long()\n",
    "            return {\"image\": image, \"label\": label}\n",
    "        else:\n",
    "            label = torch.tensor(1)\n",
    "            return image, label\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.df)\n",
    "\n",
    "\n",
    "class ArcMarginProduct(nn.Module):\n",
    "    r\"\"\"Implement of large margin arc distance: :\n",
    "    Args:\n",
    "        in_features: size of each input sample\n",
    "        out_features: size of each output sample\n",
    "        s: norm of input feature\n",
    "        m: margin\n",
    "        cos(theta + m)\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(\n",
    "        self, in_features, out_features, s=30.0, m=0.50, easy_margin=False, ls_eps=0.0\n",
    "    ):\n",
    "        super(ArcMarginProduct, self).__init__()\n",
    "        self.in_features = in_features\n",
    "        self.out_features = out_features\n",
    "        self.s = s\n",
    "        self.m = m\n",
    "        self.ls_eps = ls_eps  # label smoothing\n",
    "        self.weight = Parameter(torch.FloatTensor(out_features, in_features))\n",
    "        nn.init.xavier_uniform_(self.weight)\n",
    "\n",
    "        self.easy_margin = easy_margin\n",
    "        self.cos_m = math.cos(m)\n",
    "        self.sin_m = math.sin(m)\n",
    "        self.th = math.cos(math.pi - m)\n",
    "        self.mm = math.sin(math.pi - m) * m\n",
    "\n",
    "    def forward(self, input, label):\n",
    "        # --------------------------- cos(theta) & phi(theta) ---------------------------\n",
    "        cosine = F.linear(F.normalize(input), F.normalize(self.weight))\n",
    "        sine = torch.sqrt(1.0 - torch.pow(cosine, 2))\n",
    "        phi = cosine * self.cos_m - sine * self.sin_m\n",
    "        if self.easy_margin:\n",
    "            phi = torch.where(cosine > 0, phi, cosine)\n",
    "        else:\n",
    "            phi = torch.where(cosine > self.th, phi, cosine - self.mm)\n",
    "        # --------------------------- convert label to one-hot ---------------------------\n",
    "        # one_hot = torch.zeros(cosine.size(), requires_grad=True, device='cuda')\n",
    "        one_hot = torch.zeros(cosine.size(), device=CFG.DEVICE)\n",
    "        one_hot.scatter_(1, label.view(-1, 1).long(), 1)\n",
    "        if self.ls_eps > 0:\n",
    "            one_hot = (1 - self.ls_eps) * one_hot + self.ls_eps / self.out_features\n",
    "        # -------------torch.where(out_i = {x_i if condition_i else y_i) -------------\n",
    "        output = (one_hot * phi) + ((1.0 - one_hot) * cosine)\n",
    "        output *= self.s\n",
    "\n",
    "        return output, nn.CrossEntropyLoss()(output, label)\n",
    "\n",
    "\n",
    "class ShopeeModel(nn.Module):\n",
    "    def __init__(\n",
    "        self,\n",
    "        n_classes=CFG.CLASSES,\n",
    "        model_name=CFG.MODEL_NAME,\n",
    "        fc_dim=CFG.FC_DIM,\n",
    "        margin=CFG.MARGIN,\n",
    "        scale=CFG.SCALE,\n",
    "        use_fc=True,\n",
    "        pretrained=True,\n",
    "    ):\n",
    "        super(ShopeeModel, self).__init__()\n",
    "        print(\"Building Model Backbone for {} model\".format(model_name))\n",
    "\n",
    "        self.backbone = timm.create_model(model_name, pretrained=pretrained)\n",
    "\n",
    "        if \"efficientnet\" in model_name:\n",
    "            final_in_features = self.backbone.classifier.in_features\n",
    "            self.backbone.classifier = nn.Identity()\n",
    "            self.backbone.global_pool = nn.Identity()\n",
    "\n",
    "        elif \"resnet\" in model_name:\n",
    "            final_in_features = self.backbone.fc.in_features\n",
    "            self.backbone.fc = nn.Identity()\n",
    "            self.backbone.global_pool = nn.Identity()\n",
    "\n",
    "        elif \"resnext\" in model_name:\n",
    "            final_in_features = self.backbone.fc.in_features\n",
    "            self.backbone.fc = nn.Identity()\n",
    "            self.backbone.global_pool = nn.Identity()\n",
    "\n",
    "        elif \"nfnet\" in model_name:\n",
    "            final_in_features = self.backbone.head.fc.in_features\n",
    "            self.backbone.head.fc = nn.Identity()\n",
    "            self.backbone.head.global_pool = nn.Identity()\n",
    "\n",
    "        self.pooling = nn.AdaptiveAvgPool2d(1)\n",
    "\n",
    "        self.use_fc = use_fc\n",
    "\n",
    "        if use_fc:\n",
    "            self.dropout = nn.Dropout(p=0.0)\n",
    "            self.fc = nn.Linear(final_in_features, fc_dim)\n",
    "            self.bn = nn.BatchNorm1d(fc_dim)\n",
    "            self._init_params()\n",
    "            final_in_features = fc_dim\n",
    "\n",
    "        self.final = ArcMarginProduct(final_in_features, n_classes, s=scale, m=margin)\n",
    "\n",
    "    def _init_params(self):\n",
    "        nn.init.xavier_normal_(self.fc.weight)\n",
    "        nn.init.constant_(self.fc.bias, 0)\n",
    "        nn.init.constant_(self.bn.weight, 1)\n",
    "        nn.init.constant_(self.bn.bias, 0)\n",
    "\n",
    "    def forward(self, image, label):\n",
    "        feature = self.extract_feat(image)\n",
    "        logits = self.final(feature, label)\n",
    "        return logits\n",
    "\n",
    "    def extract_feat(self, x):\n",
    "        batch_size = x.shape[0]\n",
    "        x = self.backbone(x)\n",
    "        x = self.pooling(x).view(batch_size, -1)\n",
    "\n",
    "        if self.use_fc:\n",
    "            x = self.dropout(x)\n",
    "            x = self.fc(x)\n",
    "            x = self.bn(x)\n",
    "        return x\n",
    "\n",
    "\n",
    "def seed_everything(seed):\n",
    "    random.seed(seed)\n",
    "    os.environ[\"PYTHONHASHSEED\"] = str(seed)\n",
    "    np.random.seed(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "    torch.backends.cudnn.benchmark = True  # set True to be faster\n",
    "    \n",
    "def get_test_embeddings(test_df):\n",
    "    model = ShopeeModel(pretrained=False)\n",
    "    model.eval()\n",
    "    model = replace_activations(model, torch.nn.SiLU, Mish()) ###\n",
    "    model.load_state_dict(torch.load(CFG.MODEL_PATH))\n",
    "    model = model.to(CFG.DEVICE)\n",
    "\n",
    "    image_dataset = ShopeeImageDataset(test_df,transform=get_test_transforms(),train=False)\n",
    "    image_loader = torch.utils.data.DataLoader(\n",
    "        image_dataset,\n",
    "        batch_size=CFG.BATCH_SIZE,\n",
    "        pin_memory=True,\n",
    "        num_workers = CFG.NUM_WORKERS,\n",
    "        drop_last=False\n",
    "    )\n",
    "\n",
    "    embeds = []\n",
    "    with torch.no_grad():\n",
    "        for img,label in tqdm(image_loader): \n",
    "            img = img.cuda()\n",
    "            label = label.cuda()\n",
    "            feat,_ = model(img,label)\n",
    "            image_embeddings = feat.detach().cpu().numpy()\n",
    "            embeds.append(image_embeddings)\n",
    "    \n",
    "    del model\n",
    "    image_embeddings = np.concatenate(embeds)\n",
    "    print(f'Our image embeddings shape is {image_embeddings.shape}')\n",
    "    del embeds\n",
    "    gc.collect()\n",
    "    return image_embeddings\n",
    "\n",
    "img_embs = get_test_embeddings(\n",
    "    train\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "210dcfd0",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-10T18:18:52.146950Z",
     "iopub.status.busy": "2023-11-10T18:18:52.146485Z",
     "iopub.status.idle": "2023-11-10T18:19:30.570330Z",
     "shell.execute_reply": "2023-11-10T18:19:30.569442Z"
    },
    "papermill": {
     "duration": 38.432277,
     "end_time": "2023-11-10T18:19:30.572527",
     "exception": false,
     "start_time": "2023-11-10T18:18:52.140250",
     "status": "completed"
    },
    "tags": [],
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "cuda:0\n",
      "Building Model Backbone for ../input/sentence-transformer-models/paraphrase-xlm-r-multilingual-v1/0_Transformer model\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "get_bert_embeddings: 100%|████████████████████████| 2/2 [00:00<00:00, 28.72it/s]\n"
     ]
    }
   ],
   "source": [
    "# %% [markdown]\n",
    "# # Shopee Training Paraphrase XLM\n",
    "\n",
    "# %%\n",
    "import os, sys\n",
    "import gc\n",
    "import math\n",
    "import random\n",
    "from tqdm import tqdm\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "from sklearn.preprocessing import LabelEncoder\n",
    "from sklearn.model_selection import GroupKFold\n",
    "from sklearn.neighbors import NearestNeighbors\n",
    "\n",
    "import torch\n",
    "from torch import nn\n",
    "import torch.nn.functional as F\n",
    "from transformers import AutoTokenizer, AutoModel\n",
    "\n",
    "sys.path.append(\"../input/utils\")\n",
    "import get_KNN, eval_preds\n",
    "\n",
    "import warnings\n",
    "\n",
    "def init():\n",
    "    warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "    os.environ[\"TOKENIZERS_PARALLELISM\"] = \"true\"\n",
    "    seed_everything(CFG.seed)\n",
    "\n",
    "\n",
    "# %%\n",
    "\n",
    "\n",
    "class CFG:\n",
    "    compute_cv = True  # set False to train model for submission\n",
    "\n",
    "    ### BERT\n",
    "#     bert_model_name = \"../input/shopee-models/paraphrase-xlm-r-multilingual-v1\"\n",
    "    bert_model_name = \"../input/sentence-transformer-models/paraphrase-xlm-r-multilingual-v1/0_Transformer\" #!!!!!\n",
    "    max_length = 128\n",
    "\n",
    "    ### ArcFace\n",
    "    scale = 30\n",
    "\n",
    "    margin = 0.6\n",
    "    fc_dim = 768\n",
    "    seed = 2023\n",
    "    classes = 11014\n",
    "\n",
    "    # groupkfold\n",
    "    N_SPLITS = 5\n",
    "    TEST_FOLD = 0\n",
    "    VALID_FOLD = 1\n",
    "\n",
    "    ### Training\n",
    "    batch_size = 16\n",
    "    accum_iter = 1  # 1 if use_sam = True\n",
    "    epochs = 8\n",
    "    min_save_epoch = epochs // 3\n",
    "    use_sam = True  # SAM (Sharpness-Aware Minimization for Efficiently Improving Generalization)\n",
    "    use_amp = True  # Automatic Mixed Precision\n",
    "    num_workers = 2  # On Windows, set 0 or export train_fn and TitleDataset as .py files for faster training.\n",
    "    device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "    print(device)\n",
    "\n",
    "    ### NearestNeighbors\n",
    "    bert_knn = 50\n",
    "    bert_knn_threshold = 0.4  # Cosine distance threshold\n",
    "\n",
    "    ### GradualWarmupSchedulerV2（lr_start -> lr_max -> lr_min）\n",
    "    scheduler_params = {\n",
    "        \"lr_start\": 7.5e-6,\n",
    "        \"lr_max\": 1e-4,\n",
    "        \"lr_min\": 2.74e-5,  # 1.5e-5,\n",
    "    }\n",
    "    multiplier = scheduler_params[\"lr_max\"] / scheduler_params[\"lr_start\"]\n",
    "    eta_min = scheduler_params[\"lr_min\"]  # last minimum learning rate\n",
    "    freeze_epo = 0\n",
    "    warmup_epo = 2\n",
    "    cosine_epo = epochs - freeze_epo - warmup_epo\n",
    "\n",
    "    ### save_model_path\n",
    "#     save_model_path = f\"../input/shopee-models/{bert_model_name.rsplit('/', 1)[-1]}_epoch{epochs}-bs{batch_size}x{accum_iter}.pt\"\n",
    "    save_model_path = \"/kaggle/input/shopee-models/paraphrase-xlm-r-multilingual-v1_epoch8-bs16x1.pt\"\n",
    "\n",
    "\n",
    "# %%\n",
    "def seed_everything(seed):\n",
    "    random.seed(seed)\n",
    "    os.environ[\"PYTHONHASHSEED\"] = str(seed)\n",
    "    np.random.seed(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "    torch.backends.cudnn.benchmark = True  # set True to be faster\n",
    "\n",
    "\n",
    "# %%\n",
    "### Dataset\n",
    "\n",
    "\n",
    "class TitleDataset(torch.utils.data.Dataset):\n",
    "    def __init__(self, df, text_column, label_column):\n",
    "        texts = df[text_column]\n",
    "        self.labels = df[label_column].values\n",
    "\n",
    "        self.titles = []\n",
    "        for title in texts:\n",
    "            title = title.encode(\"utf-8\").decode(\"unicode_escape\")\n",
    "            title = title.encode(\"ascii\", \"ignore\").decode(\"unicode_escape\")\n",
    "            title = title.lower()\n",
    "            self.titles.append(title)\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.titles)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        text = self.titles[idx]\n",
    "        label = torch.tensor(self.labels[idx])\n",
    "        return text, label\n",
    "\n",
    "\n",
    "# %%\n",
    "### SAM Optimizer 2020/1/16\n",
    "# https://github.com/davda54/sam/blob/main/sam.py\n",
    "\n",
    "\n",
    "class SAM(torch.optim.Optimizer):\n",
    "    def __init__(self, params, base_optimizer, rho=0.05, **kwargs):\n",
    "        assert rho >= 0.0, f\"Invalid rho, should be non-negative: {rho}\"\n",
    "\n",
    "        defaults = dict(rho=rho, **kwargs)\n",
    "        super(SAM, self).__init__(params, defaults)\n",
    "\n",
    "        self.base_optimizer = base_optimizer(self.param_groups, **kwargs)\n",
    "        self.param_groups = self.base_optimizer.param_groups\n",
    "\n",
    "    @torch.no_grad()\n",
    "    def first_step(self, zero_grad=False):\n",
    "        grad_norm = self._grad_norm()\n",
    "        for group in self.param_groups:\n",
    "            scale = group[\"rho\"] / (grad_norm + 1e-12)\n",
    "\n",
    "            for p in group[\"params\"]:\n",
    "                if p.grad is None:\n",
    "                    continue\n",
    "                e_w = p.grad * scale.to(p)\n",
    "                p.add_(e_w)  # climb to the local maximum \"w + e(w)\"\n",
    "                self.state[p][\"e_w\"] = e_w\n",
    "\n",
    "        if zero_grad:\n",
    "            self.zero_grad()\n",
    "\n",
    "    @torch.no_grad()\n",
    "    def second_step(self, zero_grad=False):\n",
    "        for group in self.param_groups:\n",
    "            for p in group[\"params\"]:\n",
    "                if p.grad is None:\n",
    "                    continue\n",
    "                p.sub_(self.state[p][\"e_w\"])  # get back to \"w\" from \"w + e(w)\"\n",
    "\n",
    "        self.base_optimizer.step()  # do the actual \"sharpness-aware\" update\n",
    "\n",
    "        if zero_grad:\n",
    "            self.zero_grad()\n",
    "\n",
    "    @torch.no_grad()\n",
    "    def step(self, closure=None):\n",
    "        assert (\n",
    "            closure is not None\n",
    "        ), \"Sharpness Aware Minimization requires closure, but it was not provided\"\n",
    "        closure = torch.enable_grad()(\n",
    "            closure\n",
    "        )  # the closure should do a full forward-backward pass\n",
    "\n",
    "        self.first_step(zero_grad=True)\n",
    "        closure()\n",
    "        self.second_step()\n",
    "\n",
    "    def _grad_norm(self):\n",
    "        shared_device = self.param_groups[0][\"params\"][\n",
    "            0\n",
    "        ].device  # put everything on the same device, in case of model parallelism\n",
    "        norm = torch.norm(\n",
    "            torch.stack(\n",
    "                [\n",
    "                    p.grad.norm(p=2).to(shared_device)\n",
    "                    for group in self.param_groups\n",
    "                    for p in group[\"params\"]\n",
    "                    if p.grad is not None\n",
    "                ]\n",
    "            ),\n",
    "            p=2,\n",
    "        )\n",
    "        return norm\n",
    "\n",
    "\n",
    "# %%\n",
    "### GradualWarmupScheduler\n",
    "# https://github.com/ildoonet/pytorch-gradual-warmup-lr\n",
    "\n",
    "from torch.optim.lr_scheduler import _LRScheduler\n",
    "from torch.optim.lr_scheduler import ReduceLROnPlateau\n",
    "\n",
    "\n",
    "class GradualWarmupScheduler(_LRScheduler):\n",
    "    \"\"\"Gradually warm-up(increasing) learning rate in optimizer.\n",
    "    Proposed in 'Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour'.\n",
    "    Args:\n",
    "        optimizer (Optimizer): Wrapped optimizer.\n",
    "        multiplier: target learning rate = base lr * multiplier if multiplier > 1.0. if multiplier = 1.0, lr starts from 0 and ends up with the base_lr.\n",
    "        total_epoch: target learning rate is reached at total_epoch, gradually\n",
    "        after_scheduler: after target_epoch, use this scheduler(eg. ReduceLROnPlateau)\n",
    "    \"\"\"\n",
    "\n",
    "    def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):\n",
    "        self.multiplier = multiplier\n",
    "        if self.multiplier < 1.0:\n",
    "            raise ValueError(\"multiplier should be greater thant or equal to 1.\")\n",
    "        self.total_epoch = total_epoch\n",
    "        self.after_scheduler = after_scheduler\n",
    "        self.finished = False\n",
    "        super(GradualWarmupScheduler, self).__init__(optimizer)\n",
    "\n",
    "    def get_lr(self):\n",
    "        if self.last_epoch > self.total_epoch:\n",
    "            if self.after_scheduler:\n",
    "                if not self.finished:\n",
    "                    self.after_scheduler.base_lrs = [\n",
    "                        base_lr * self.multiplier for base_lr in self.base_lrs\n",
    "                    ]\n",
    "                    self.finished = True\n",
    "                return self.after_scheduler.get_last_lr()\n",
    "            return [base_lr * self.multiplier for base_lr in self.base_lrs]\n",
    "\n",
    "        if self.multiplier == 1.0:\n",
    "            return [\n",
    "                base_lr * (float(self.last_epoch) / self.total_epoch)\n",
    "                for base_lr in self.base_lrs\n",
    "            ]\n",
    "        else:\n",
    "            return [\n",
    "                base_lr\n",
    "                * ((self.multiplier - 1.0) * self.last_epoch / self.total_epoch + 1.0)\n",
    "                for base_lr in self.base_lrs\n",
    "            ]\n",
    "\n",
    "    def step_ReduceLROnPlateau(self, metrics, epoch=None):\n",
    "        if epoch is None:\n",
    "            epoch = self.last_epoch + 1\n",
    "        self.last_epoch = (\n",
    "            epoch if epoch != 0 else 1\n",
    "        )  # ReduceLROnPlateau is called at the end of epoch, whereas others are called at beginning\n",
    "        if self.last_epoch <= self.total_epoch:\n",
    "            warmup_lr = [\n",
    "                base_lr\n",
    "                * ((self.multiplier - 1.0) * self.last_epoch / self.total_epoch + 1.0)\n",
    "                for base_lr in self.base_lrs\n",
    "            ]\n",
    "            for param_group, lr in zip(self.optimizer.param_groups, warmup_lr):\n",
    "                param_group[\"lr\"] = lr\n",
    "        else:\n",
    "            if epoch is None:\n",
    "                self.after_scheduler.step(metrics, None)\n",
    "            else:\n",
    "                self.after_scheduler.step(metrics, epoch - self.total_epoch)\n",
    "\n",
    "    def step(self, epoch=None, metrics=None):\n",
    "        if type(self.after_scheduler) != ReduceLROnPlateau:\n",
    "            if self.finished and self.after_scheduler:\n",
    "                if epoch is None:\n",
    "                    self.after_scheduler.step(None)\n",
    "                else:\n",
    "                    self.after_scheduler.step(epoch - self.total_epoch)\n",
    "                self._last_lr = self.after_scheduler.get_last_lr()\n",
    "            else:\n",
    "                return super(GradualWarmupScheduler, self).step(epoch)\n",
    "        else:\n",
    "            self.step_ReduceLROnPlateau(metrics, epoch)\n",
    "\n",
    "\n",
    "# %%\n",
    "### GradualWarmupSchedulerV2\n",
    "\n",
    "\n",
    "class GradualWarmupSchedulerV2(GradualWarmupScheduler):\n",
    "    def __init__(self, optimizer, multiplier, total_epoch, after_scheduler=None):\n",
    "        super(GradualWarmupSchedulerV2, self).__init__(\n",
    "            optimizer, multiplier, total_epoch, after_scheduler\n",
    "        )\n",
    "\n",
    "    def get_lr(self):\n",
    "        if self.last_epoch > self.total_epoch:\n",
    "            if self.after_scheduler:\n",
    "                if not self.finished:\n",
    "                    self.after_scheduler.base_lrs = [\n",
    "                        base_lr * self.multiplier for base_lr in self.base_lrs\n",
    "                    ]\n",
    "                    self.finished = True\n",
    "                return self.after_scheduler.get_lr()\n",
    "            return [base_lr * self.multiplier for base_lr in self.base_lrs]\n",
    "        if self.multiplier == 1.0:\n",
    "            return [\n",
    "                base_lr * (float(self.last_epoch) / self.total_epoch)\n",
    "                for base_lr in self.base_lrs\n",
    "            ]\n",
    "        else:\n",
    "            return [\n",
    "                base_lr\n",
    "                * ((self.multiplier - 1.0) * self.last_epoch / self.total_epoch + 1.0)\n",
    "                for base_lr in self.base_lrs\n",
    "            ]\n",
    "\n",
    "\n",
    "# %%\n",
    "### ArcFace\n",
    "class ArcMarginProduct(nn.Module):\n",
    "    def __init__(\n",
    "        self,\n",
    "        in_features,\n",
    "        out_features,\n",
    "        scale=30.0,\n",
    "        margin=0.50,\n",
    "        easy_margin=False,\n",
    "        ls_eps=0.0,\n",
    "    ):\n",
    "        super(ArcMarginProduct, self).__init__()\n",
    "        self.in_features = in_features\n",
    "        self.out_features = out_features\n",
    "        self.scale = scale\n",
    "        self.margin = margin\n",
    "        self.ls_eps = ls_eps  # label smoothing\n",
    "        self.weight = nn.Parameter(torch.FloatTensor(out_features, in_features))\n",
    "        nn.init.xavier_uniform_(self.weight)\n",
    "\n",
    "        self.easy_margin = easy_margin\n",
    "        self.cos_m = math.cos(margin)\n",
    "        self.sin_m = math.sin(margin)\n",
    "        self.th = math.cos(math.pi - margin)\n",
    "        self.mm = math.sin(math.pi - margin) * margin\n",
    "\n",
    "        self.criterion = nn.CrossEntropyLoss()\n",
    "\n",
    "    def forward(self, input, label):\n",
    "        # --------------------------- cos(theta) & phi(theta) ---------------------------\n",
    "        if CFG.use_amp:\n",
    "            cosine = F.linear(\n",
    "                F.normalize(input), F.normalize(self.weight)\n",
    "            ).float()  # if CFG.use_amp\n",
    "        else:\n",
    "            cosine = F.linear(F.normalize(input), F.normalize(self.weight))\n",
    "        sine = torch.sqrt(1.0 - torch.pow(cosine, 2))\n",
    "        phi = cosine * self.cos_m - sine * self.sin_m\n",
    "        if self.easy_margin:\n",
    "            phi = torch.where(cosine > 0, phi, cosine)\n",
    "        else:\n",
    "            phi = torch.where(cosine > self.th, phi, cosine - self.mm)\n",
    "        # --------------------------- convert label to one-hot ---------------------------\n",
    "        one_hot = torch.zeros(cosine.size(), device=CFG.device)\n",
    "        one_hot.scatter_(1, label.view(-1, 1).long(), 1)\n",
    "        if self.ls_eps > 0:\n",
    "            one_hot = (1 - self.ls_eps) * one_hot + self.ls_eps / self.out_features\n",
    "\n",
    "        output = (one_hot * phi) + ((1.0 - one_hot) * cosine)\n",
    "        output *= self.scale\n",
    "        return output, self.criterion(output, label)\n",
    "\n",
    "\n",
    "# %%\n",
    "### BERT\n",
    "\n",
    "\n",
    "# Mean Pooling - Take attention mask into account for correct averaging\n",
    "def mean_pooling(model_output, attention_mask):\n",
    "    token_embeddings = model_output[\n",
    "        0\n",
    "    ]  # First element of model_output contains all token embeddings\n",
    "    input_mask_expanded = (\n",
    "        attention_mask.unsqueeze(-1).expand(token_embeddings.size()).float()\n",
    "    )\n",
    "    sum_embeddings = torch.sum(token_embeddings * input_mask_expanded, 1)\n",
    "    sum_mask = torch.clamp(input_mask_expanded.sum(1), min=1e-9)\n",
    "    return sum_embeddings / sum_mask\n",
    "\n",
    "\n",
    "class ShopeeBertModel(nn.Module):\n",
    "    def __init__(\n",
    "        self,\n",
    "        n_classes=CFG.classes,\n",
    "        model_name=CFG.bert_model_name,\n",
    "        fc_dim=CFG.fc_dim,\n",
    "        margin=CFG.margin,\n",
    "        scale=CFG.scale,\n",
    "        use_fc=True,\n",
    "    ):\n",
    "        super(ShopeeBertModel, self).__init__()\n",
    "        print(\"Building Model Backbone for {} model\".format(model_name))\n",
    "\n",
    "        self.tokenizer = AutoTokenizer.from_pretrained(model_name)\n",
    "        self.backbone = AutoModel.from_pretrained(model_name).to(CFG.device)\n",
    "\n",
    "        in_features = 768\n",
    "        self.use_fc = use_fc\n",
    "\n",
    "        if use_fc:\n",
    "            self.dropout = nn.Dropout(p=0.0)\n",
    "            self.classifier = nn.Linear(in_features, fc_dim)\n",
    "            self.bn = nn.BatchNorm1d(fc_dim)\n",
    "            self._init_params()\n",
    "            in_features = fc_dim\n",
    "\n",
    "        self.final = ArcMarginProduct(\n",
    "            in_features,\n",
    "            n_classes,\n",
    "            scale=scale,\n",
    "            margin=margin,\n",
    "            easy_margin=False,\n",
    "            ls_eps=0.0,\n",
    "        )\n",
    "\n",
    "    def _init_params(self):\n",
    "        nn.init.xavier_normal_(self.classifier.weight)\n",
    "        nn.init.constant_(self.classifier.bias, 0)\n",
    "        nn.init.constant_(self.bn.weight, 1)\n",
    "        nn.init.constant_(self.bn.bias, 0)\n",
    "\n",
    "    def set_training(self, training):\n",
    "        self.training = training\n",
    "\n",
    "    def forward(self, texts, labels=torch.tensor([0])):\n",
    "        features = self.extract_features(texts)\n",
    "        if self.training:\n",
    "            logits = self.final(features, labels.to(CFG.device))\n",
    "            return logits\n",
    "        else:\n",
    "            return features\n",
    "\n",
    "    def extract_features(self, texts):\n",
    "        encoding = self.tokenizer(\n",
    "            texts,\n",
    "            padding=True,\n",
    "            truncation=True,\n",
    "            max_length=CFG.max_length,\n",
    "            return_tensors=\"pt\",\n",
    "        ).to(CFG.device)\n",
    "        input_ids = encoding[\"input_ids\"]\n",
    "        attention_mask = encoding[\"attention_mask\"]\n",
    "        embedding = self.backbone(input_ids, attention_mask=attention_mask)\n",
    "        x = mean_pooling(embedding, attention_mask)\n",
    "\n",
    "        if self.use_fc:\n",
    "            x = self.dropout(x)\n",
    "            x = self.classifier(x)\n",
    "            x = self.bn(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "\n",
    "def get_test_embeddings(df, column, chunk=32):\n",
    "    model = ShopeeBertModel()\n",
    "    model.eval()\n",
    "    model.load_state_dict(torch.load(CFG.save_model_path))\n",
    "    model.set_training(False)\n",
    "    model = model.to(CFG.device)\n",
    "    \n",
    "    bert_embeddings = torch.zeros((df.shape[0], 768)).to(CFG.device)\n",
    "    for i in tqdm(\n",
    "        list(range(0, df.shape[0], chunk)) + [df.shape[0] - chunk],\n",
    "        desc=\"get_bert_embeddings\",\n",
    "        ncols=80,\n",
    "    ):\n",
    "        titles = []\n",
    "        for idx, title in enumerate(df[column][i:i + chunk].values):\n",
    "            try:\n",
    "                title = title.encode(\"utf-8\").decode(\"unicode_escape\")\n",
    "                title = title.encode(\"ascii\", \"ignore\").decode(\"unicode_escape\")\n",
    "            except:\n",
    "                pass\n",
    "            # title = text_punctuation(title)\n",
    "            title = title.lower()\n",
    "            titles.append(title)\n",
    "\n",
    "        with torch.no_grad():\n",
    "            if CFG.use_amp:\n",
    "                with torch.cuda.amp.autocast():\n",
    "                    model_output = model(titles)\n",
    "            else:\n",
    "                model_output = model(titles)\n",
    "\n",
    "        bert_embeddings[i : i + chunk] = model_output\n",
    "\n",
    "    del model, titles, model_output\n",
    "    gc.collect()\n",
    "    torch.cuda.empty_cache()\n",
    "\n",
    "    return bert_embeddings\n",
    "\n",
    "bert_embs = get_test_embeddings(\n",
    "    train,\"title\"\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "8ee523d9",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-10T18:19:30.584710Z",
     "iopub.status.busy": "2023-11-10T18:19:30.583677Z",
     "iopub.status.idle": "2023-11-10T18:19:30.589561Z",
     "shell.execute_reply": "2023-11-10T18:19:30.588851Z"
    },
    "papermill": {
     "duration": 0.013652,
     "end_time": "2023-11-10T18:19:30.591413",
     "exception": false,
     "start_time": "2023-11-10T18:19:30.577761",
     "status": "completed"
    },
    "tags": [],
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "np.save(\"/kaggle/working/img_embs.npy\", img_embs)\n",
    "np.save(\"/kaggle/working/bert_embs.npy\",  torch.Tensor.cpu(bert_embs))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "3d4b95d2",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-10T18:19:30.602133Z",
     "iopub.status.busy": "2023-11-10T18:19:30.601838Z",
     "iopub.status.idle": "2023-11-10T18:19:30.607721Z",
     "shell.execute_reply": "2023-11-10T18:19:30.607043Z"
    },
    "papermill": {
     "duration": 0.013303,
     "end_time": "2023-11-10T18:19:30.609534",
     "exception": false,
     "start_time": "2023-11-10T18:19:30.596231",
     "status": "completed"
    },
    "tags": [],
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "img_embs=np.load(\"/kaggle/working/img_embs.npy\")\n",
    "bert_embs=np.load(\"/kaggle/working/bert_embs.npy\")\n",
    "img_embs = torch.from_numpy(img_embs)\n",
    "bert_embs = torch.from_numpy(bert_embs)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "8c61220c",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-10T18:19:30.620337Z",
     "iopub.status.busy": "2023-11-10T18:19:30.620078Z",
     "iopub.status.idle": "2023-11-10T18:19:30.624509Z",
     "shell.execute_reply": "2023-11-10T18:19:30.623560Z"
    },
    "papermill": {
     "duration": 0.012137,
     "end_time": "2023-11-10T18:19:30.626595",
     "exception": false,
     "start_time": "2023-11-10T18:19:30.614458",
     "status": "completed"
    },
    "tags": [],
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "torch.Size([3, 11014]) torch.Size([3, 768])\n"
     ]
    }
   ],
   "source": [
    "print(img_embs.shape,bert_embs.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "0c856cdf",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-10T18:19:30.637922Z",
     "iopub.status.busy": "2023-11-10T18:19:30.637414Z",
     "iopub.status.idle": "2023-11-10T18:19:30.683430Z",
     "shell.execute_reply": "2023-11-10T18:19:30.682482Z"
    },
    "papermill": {
     "duration": 0.053867,
     "end_time": "2023-11-10T18:19:30.685468",
     "exception": false,
     "start_time": "2023-11-10T18:19:30.631601",
     "status": "completed"
    },
    "tags": [],
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "3\n",
      "removed 0 matches\n"
     ]
    }
   ],
   "source": [
    "img_embs=F.normalize(img_embs)\n",
    "bert_embs=F.normalize(bert_embs)\n",
    "\n",
    "set_size = len(img_embs)\n",
    "print(set_size)\n",
    "\n",
    "\n",
    "test_df = helpers.add_measurements(train)\n",
    "\n",
    "new_embs = helpers.blend_embs([img_embs, bert_embs], test_df)\n",
    "\n",
    "combined_inds, combined_dists = helpers.combined_distances(new_embs)\n",
    "# helpers.check_measurements(combined_dists, combined_inds, test_df)\n",
    "pairs = helpers.sorted_pairs(combined_dists, combined_inds)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "faee9ed7",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-10T18:19:30.697922Z",
     "iopub.status.busy": "2023-11-10T18:19:30.697422Z",
     "iopub.status.idle": "2023-11-10T18:19:30.811044Z",
     "shell.execute_reply": "2023-11-10T18:19:30.810099Z"
    },
    "papermill": {
     "duration": 0.122315,
     "end_time": "2023-11-10T18:19:30.813429",
     "exception": false,
     "start_time": "2023-11-10T18:19:30.691114",
     "status": "completed"
    },
    "tags": [],
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [],
   "source": [
    "groups = [[] for _ in range(set_size)]\n",
    "groups_p = [[] for _ in range(set_size)]\n",
    "for x,y,v in pairs:\n",
    "    groups[x].append(y)\n",
    "    groups_p[x].append(v)\n",
    "for pos, size_pct in helpers.get_targets_shape(ttrain):\n",
    "    helpers.chisel(groups, groups_p, pos, int(size_pct * len(groups)))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "7ab5f337",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2023-11-10T18:19:30.825273Z",
     "iopub.status.busy": "2023-11-10T18:19:30.824754Z",
     "iopub.status.idle": "2023-11-10T18:19:30.841974Z",
     "shell.execute_reply": "2023-11-10T18:19:30.841118Z"
    },
    "papermill": {
     "duration": 0.025463,
     "end_time": "2023-11-10T18:19:30.844137",
     "exception": false,
     "start_time": "2023-11-10T18:19:30.818674",
     "status": "completed"
    },
    "tags": [],
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>posting_id</th>\n",
       "      <th>matches</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>test_2255846744</td>\n",
       "      <td>test_2255846744 test_4015706929 test_3588702337</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>test_3588702337</td>\n",
       "      <td>test_3588702337 test_2255846744</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>test_4015706929</td>\n",
       "      <td>test_4015706929 test_2255846744 test_3588702337</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "</div>"
      ],
      "text/plain": [
       "        posting_id                                          matches\n",
       "0  test_2255846744  test_2255846744 test_4015706929 test_3588702337\n",
       "1  test_3588702337                  test_3588702337 test_2255846744\n",
       "2  test_4015706929  test_4015706929 test_2255846744 test_3588702337"
      ]
     },
     "execution_count": 9,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "matches = [' '.join(test_df.iloc[g].posting_id.to_list()) for g in groups]\n",
    "test_df['matches'] = matches\n",
    "\n",
    "test_df[['posting_id','matches']].to_csv('submission.csv',index=False)\n",
    "pd.read_csv('submission.csv').head()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.12"
  },
  "papermill": {
   "default_parameters": {},
   "duration": 62.486617,
   "end_time": "2023-11-10T18:19:34.423713",
   "environment_variables": {},
   "exception": null,
   "input_path": "__notebook__.ipynb",
   "output_path": "__notebook__.ipynb",
   "parameters": {},
   "start_time": "2023-11-10T18:18:31.937096",
   "version": "2.4.0"
  },
  "widgets": {
   "application/vnd.jupyter.widget-state+json": {
    "state": {
     "18612b29bff34224ada6498cf7eaef04": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "ProgressStyleModel",
      "state": {
       "_model_module": "@jupyter-widgets/controls",
       "_model_module_version": "1.5.0",
       "_model_name": "ProgressStyleModel",
       "_view_count": null,
       "_view_module": "@jupyter-widgets/base",
       "_view_module_version": "1.2.0",
       "_view_name": "StyleView",
       "bar_color": null,
       "description_width": ""
      }
     },
     "1bdbb191ed0249e095e3ed45c4cc5483": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "DescriptionStyleModel",
      "state": {
       "_model_module": "@jupyter-widgets/controls",
       "_model_module_version": "1.5.0",
       "_model_name": "DescriptionStyleModel",
       "_view_count": null,
       "_view_module": "@jupyter-widgets/base",
       "_view_module_version": "1.2.0",
       "_view_name": "StyleView",
       "description_width": ""
      }
     },
     "2df2aeff64ec4e92a50683b3cfaa7867": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "FloatProgressModel",
      "state": {
       "_dom_classes": [],
       "_model_module": "@jupyter-widgets/controls",
       "_model_module_version": "1.5.0",
       "_model_name": "FloatProgressModel",
       "_view_count": null,
       "_view_module": "@jupyter-widgets/controls",
       "_view_module_version": "1.5.0",
       "_view_name": "ProgressView",
       "bar_style": "success",
       "description": "",
       "description_tooltip": null,
       "layout": "IPY_MODEL_af3e9b1becb54e0990481916f57e68b0",
       "max": 1.0,
       "min": 0.0,
       "orientation": "horizontal",
       "style": "IPY_MODEL_18612b29bff34224ada6498cf7eaef04",
       "value": 1.0
      }
     },
     "39f7810527d7455d954c46cf33308934": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "DescriptionStyleModel",
      "state": {
       "_model_module": "@jupyter-widgets/controls",
       "_model_module_version": "1.5.0",
       "_model_name": "DescriptionStyleModel",
       "_view_count": null,
       "_view_module": "@jupyter-widgets/base",
       "_view_module_version": "1.2.0",
       "_view_name": "StyleView",
       "description_width": ""
      }
     },
     "568df850d6f2407f94985b4a4cc4d29d": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "HBoxModel",
      "state": {
       "_dom_classes": [],
       "_model_module": "@jupyter-widgets/controls",
       "_model_module_version": "1.5.0",
       "_model_name": "HBoxModel",
       "_view_count": null,
       "_view_module": "@jupyter-widgets/controls",
       "_view_module_version": "1.5.0",
       "_view_name": "HBoxView",
       "box_style": "",
       "children": [
        "IPY_MODEL_fc0131b0d9014e3f9c0f96a6b3045698",
        "IPY_MODEL_2df2aeff64ec4e92a50683b3cfaa7867",
        "IPY_MODEL_e82e16dad23d44e69823225d2015189f"
       ],
       "layout": "IPY_MODEL_e7741c57b28f451a8dd362767f7c87b2"
      }
     },
     "6a06e24b0c4647b2b044684be733fb2b": {
      "model_module": "@jupyter-widgets/base",
      "model_module_version": "1.2.0",
      "model_name": "LayoutModel",
      "state": {
       "_model_module": "@jupyter-widgets/base",
       "_model_module_version": "1.2.0",
       "_model_name": "LayoutModel",
       "_view_count": null,
       "_view_module": "@jupyter-widgets/base",
       "_view_module_version": "1.2.0",
       "_view_name": "LayoutView",
       "align_content": null,
       "align_items": null,
       "align_self": null,
       "border": null,
       "bottom": null,
       "display": null,
       "flex": null,
       "flex_flow": null,
       "grid_area": null,
       "grid_auto_columns": null,
       "grid_auto_flow": null,
       "grid_auto_rows": null,
       "grid_column": null,
       "grid_gap": null,
       "grid_row": null,
       "grid_template_areas": null,
       "grid_template_columns": null,
       "grid_template_rows": null,
       "height": null,
       "justify_content": null,
       "justify_items": null,
       "left": null,
       "margin": null,
       "max_height": null,
       "max_width": null,
       "min_height": null,
       "min_width": null,
       "object_fit": null,
       "object_position": null,
       "order": null,
       "overflow": null,
       "overflow_x": null,
       "overflow_y": null,
       "padding": null,
       "right": null,
       "top": null,
       "visibility": null,
       "width": null
      }
     },
     "af3e9b1becb54e0990481916f57e68b0": {
      "model_module": "@jupyter-widgets/base",
      "model_module_version": "1.2.0",
      "model_name": "LayoutModel",
      "state": {
       "_model_module": "@jupyter-widgets/base",
       "_model_module_version": "1.2.0",
       "_model_name": "LayoutModel",
       "_view_count": null,
       "_view_module": "@jupyter-widgets/base",
       "_view_module_version": "1.2.0",
       "_view_name": "LayoutView",
       "align_content": null,
       "align_items": null,
       "align_self": null,
       "border": null,
       "bottom": null,
       "display": null,
       "flex": null,
       "flex_flow": null,
       "grid_area": null,
       "grid_auto_columns": null,
       "grid_auto_flow": null,
       "grid_auto_rows": null,
       "grid_column": null,
       "grid_gap": null,
       "grid_row": null,
       "grid_template_areas": null,
       "grid_template_columns": null,
       "grid_template_rows": null,
       "height": null,
       "justify_content": null,
       "justify_items": null,
       "left": null,
       "margin": null,
       "max_height": null,
       "max_width": null,
       "min_height": null,
       "min_width": null,
       "object_fit": null,
       "object_position": null,
       "order": null,
       "overflow": null,
       "overflow_x": null,
       "overflow_y": null,
       "padding": null,
       "right": null,
       "top": null,
       "visibility": null,
       "width": null
      }
     },
     "e7741c57b28f451a8dd362767f7c87b2": {
      "model_module": "@jupyter-widgets/base",
      "model_module_version": "1.2.0",
      "model_name": "LayoutModel",
      "state": {
       "_model_module": "@jupyter-widgets/base",
       "_model_module_version": "1.2.0",
       "_model_name": "LayoutModel",
       "_view_count": null,
       "_view_module": "@jupyter-widgets/base",
       "_view_module_version": "1.2.0",
       "_view_name": "LayoutView",
       "align_content": null,
       "align_items": null,
       "align_self": null,
       "border": null,
       "bottom": null,
       "display": null,
       "flex": null,
       "flex_flow": null,
       "grid_area": null,
       "grid_auto_columns": null,
       "grid_auto_flow": null,
       "grid_auto_rows": null,
       "grid_column": null,
       "grid_gap": null,
       "grid_row": null,
       "grid_template_areas": null,
       "grid_template_columns": null,
       "grid_template_rows": null,
       "height": null,
       "justify_content": null,
       "justify_items": null,
       "left": null,
       "margin": null,
       "max_height": null,
       "max_width": null,
       "min_height": null,
       "min_width": null,
       "object_fit": null,
       "object_position": null,
       "order": null,
       "overflow": null,
       "overflow_x": null,
       "overflow_y": null,
       "padding": null,
       "right": null,
       "top": null,
       "visibility": null,
       "width": null
      }
     },
     "e82e16dad23d44e69823225d2015189f": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "HTMLModel",
      "state": {
       "_dom_classes": [],
       "_model_module": "@jupyter-widgets/controls",
       "_model_module_version": "1.5.0",
       "_model_name": "HTMLModel",
       "_view_count": null,
       "_view_module": "@jupyter-widgets/controls",
       "_view_module_version": "1.5.0",
       "_view_name": "HTMLView",
       "description": "",
       "description_tooltip": null,
       "layout": "IPY_MODEL_6a06e24b0c4647b2b044684be733fb2b",
       "placeholder": "​",
       "style": "IPY_MODEL_1bdbb191ed0249e095e3ed45c4cc5483",
       "value": " 1/1 [00:04&lt;00:00,  4.08s/it]"
      }
     },
     "f4265e63d7284520ad0a76f2043e831c": {
      "model_module": "@jupyter-widgets/base",
      "model_module_version": "1.2.0",
      "model_name": "LayoutModel",
      "state": {
       "_model_module": "@jupyter-widgets/base",
       "_model_module_version": "1.2.0",
       "_model_name": "LayoutModel",
       "_view_count": null,
       "_view_module": "@jupyter-widgets/base",
       "_view_module_version": "1.2.0",
       "_view_name": "LayoutView",
       "align_content": null,
       "align_items": null,
       "align_self": null,
       "border": null,
       "bottom": null,
       "display": null,
       "flex": null,
       "flex_flow": null,
       "grid_area": null,
       "grid_auto_columns": null,
       "grid_auto_flow": null,
       "grid_auto_rows": null,
       "grid_column": null,
       "grid_gap": null,
       "grid_row": null,
       "grid_template_areas": null,
       "grid_template_columns": null,
       "grid_template_rows": null,
       "height": null,
       "justify_content": null,
       "justify_items": null,
       "left": null,
       "margin": null,
       "max_height": null,
       "max_width": null,
       "min_height": null,
       "min_width": null,
       "object_fit": null,
       "object_position": null,
       "order": null,
       "overflow": null,
       "overflow_x": null,
       "overflow_y": null,
       "padding": null,
       "right": null,
       "top": null,
       "visibility": null,
       "width": null
      }
     },
     "fc0131b0d9014e3f9c0f96a6b3045698": {
      "model_module": "@jupyter-widgets/controls",
      "model_module_version": "1.5.0",
      "model_name": "HTMLModel",
      "state": {
       "_dom_classes": [],
       "_model_module": "@jupyter-widgets/controls",
       "_model_module_version": "1.5.0",
       "_model_name": "HTMLModel",
       "_view_count": null,
       "_view_module": "@jupyter-widgets/controls",
       "_view_module_version": "1.5.0",
       "_view_name": "HTMLView",
       "description": "",
       "description_tooltip": null,
       "layout": "IPY_MODEL_f4265e63d7284520ad0a76f2043e831c",
       "placeholder": "​",
       "style": "IPY_MODEL_39f7810527d7455d954c46cf33308934",
       "value": "100%"
      }
     }
    },
    "version_major": 2,
    "version_minor": 0
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}