{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "override USE_NET <class 'src.networks.ExpTripletNetwork'>\n",
      "override DATASET_PROC_METHOD_TRAIN Rescale\n",
      "override DATASET_PROC_METHOD_VAL Rescale\n",
      "override MAX_CATEGORY_NUM 64\n",
      "override IMAGE_EMBED_SIZE 512\n",
      "override NEGATIVE_SAMPLE_WITH_TYPE True\n",
      "override LEARNED_FIELD_EMBED False\n",
      "override LEARNED_FIELD_BIAS False\n",
      "override TRIPLET_MARGIN 0.2\n",
      "override LEARNED_METRICS False\n",
      "override WEIGHT_TRIPLET 1.0\n",
      "override WEIGHT_L1_MASK 0.1\n",
      "override WEIGHT_L2_GENERAL_EMB 0.1\n",
      "override USE_PRETRAINED_WORD_EMBEDDING False\n",
      "override WORD_EMBED_SIZE 300\n",
      "override MAX_VOCAB_SIZE 300\n",
      "override OUTFIT_NAME_PAD_NUM 10\n",
      "override NUM_EPOCH 70\n",
      "override LEARNING_RATE 0.0001\n",
      "override LEARNING_RATE_DECAY 0.95\n",
      "override BATCH_SIZE 64\n",
      "override SAVE_EVERY_STEPS 10000\n",
      "override SAVE_EVERY_EPOCHS 1\n",
      "override VAL_WHILE_TRAIN True\n",
      "override VAL_FASHION_COMP_FILE fashion_compatibility_small.txt\n",
      "override VAL_FITB_FILE fill_in_blank_test_small.json\n",
      "override VAL_BATCH_SIZE 8\n",
      "override VAL_EVERY_STEPS 1000\n",
      "override VAL_EVERY_EPOCHS 1\n",
      "override VAL_START_EPOCH 1\n",
      "override device cuda:0\n",
      "override TRAIN_DIR runs/src.conf.fixm_dist/11-05 17:52:53\n",
      "override VAL_DIR runs/src.conf.fixm_dist/11-05 17:52:53\n",
      "override MODEL_NAME src.conf.fixm_dist\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.utils.data\n",
    "from src.const import base_path\n",
    "import numpy as np\n",
    "import cv2\n",
    "from torchvision import transforms\n",
    "import matplotlib.pyplot as plt\n",
    "import pandas as pd\n",
    "from skimage import io, transform\n",
    "import skimage\n",
    "from src import const\n",
    "import json\n",
    "import os\n",
    "import nltk\n",
    "from src.utils import load_json, build_vocab, Vocab\n",
    "from src.base_networks import *\n",
    "from src.networks import *\n",
    "from src.dataset import *\n",
    "from torch import nn\n",
    "import torchvision\n",
    "from torch.nn import functional as F\n",
    "from src.utils import merge_const\n",
    "from sklearn.metrics import roc_auc_score\n",
    "from collections import defaultdict\n",
    "import random\n",
    "merge_const('src.conf.fixm_dist')\n",
    "const.BATCH_SIZE = 2\n",
    "const.device = 'cpu'\n",
    "class _(object):\n",
    "    pass\n",
    "self = _()\n",
    "const.VAL_FASHION_COMP_FILE = 'fashion_compatibility_prediction.txt'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 210,
   "metadata": {},
   "outputs": [],
   "source": [
    "pd.set_option('display.max_columns',100)\n",
    "pd.set_option('display.max_rows',1000)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 48,
   "metadata": {},
   "outputs": [],
   "source": [
    "const.MAX_CATEGORY_NUM = 1000"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 96,
   "metadata": {
    "scrolled": false
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "building positive pairs...\n",
      "Fashion Compatibility Test, Use File: /home/hzy/datasets/polyvore/fashion_compatibility_prediction.txt\n",
      "FITB use file: /home/hzy/datasets/polyvore/fill_in_blank_test_small.json\n"
     ]
    }
   ],
   "source": [
    "train_set = load_json(os.path.join(const.base_path, 'train_no_dup.json'))\n",
    "valid_set = load_json(os.path.join(const.base_path, 'valid_no_dup.json'))\n",
    "test_set = load_json(os.path.join(const.base_path, 'test_no_dup.json'))\n",
    "vocab = build_vocab(train_set)\n",
    "train_dataset = PolyvoreTripletDataset(train_set, const.DATASET_PROC_METHOD_TRAIN, vocab)\n",
    "dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=5)\n",
    "comp_dataset = CompatibilityBenchmarkDataset(const.DATASET_PROC_METHOD_VAL, test_set)\n",
    "fitb_dataset = FITBBenchmarkDataset(const.DATASET_PROC_METHOD_VAL, test_set)\n",
    "sample = iter(dataloader).next()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_cateid(dataset, i, j):\n",
    "    return dataset.to_real_cateid(dataset.js[i]['items'][j]['categoryid'])\n",
    "cate_pair = [(get_cateid(train_dataset, i, j1), get_cateid(train_dataset, i, j2)) for i, j1, j2 in train_dataset.pos_pairs]\n",
    "cate_df = pd.DataFrame(cate_pair, columns=['c1', 'c2'])\n",
    "cnt_cate = cate_df.groupby(['c1', 'c2']).size()\n",
    "cnt_cate = cnt_cate.sort_values(ascending=False)\n",
    "cnt = defaultdict(dict)\n",
    "for cate1, cate2 in cate_pair:\n",
    "    if cate2 in cnt[cate1]:\n",
    "        cnt[cate1][cate2] += 1\n",
    "    else:\n",
    "        cnt[cate1][cate2] = 1"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 99,
   "metadata": {},
   "outputs": [],
   "source": [
    "outfit_cate_num = []\n",
    "for outfit in train_dataset.js:\n",
    "    items = outfit['items']\n",
    "    tmp = []\n",
    "    for i in range(len(items)):\n",
    "        for j in range(len(items)):\n",
    "            if (i != j):\n",
    "                cate1 = train_dataset.to_real_cateid(items[i]['categoryid'])\n",
    "                cate2 = train_dataset.to_real_cateid(items[j]['categoryid'])\n",
    "                if cate2 in cnt[cate1]:\n",
    "                    tmp.append(cnt[cate1][cate2])\n",
    "                else:\n",
    "                    tmp.append(0)\n",
    "    outfit_cate_num.append(tmp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 223,
   "metadata": {},
   "outputs": [],
   "source": [
    "comp_cate_num = []\n",
    "for outfit_cate in comp_dataset.cates:\n",
    "    tmp = []\n",
    "    for i in range(len(outfit_cate)):\n",
    "        for j in range(len(outfit_cate)):\n",
    "             if (i != j):\n",
    "                cate1 = outfit_cate[i]\n",
    "                cate2 = outfit_cate[j]\n",
    "                if cate2 in cnt[cate1]:\n",
    "                    tmp.append(cnt[cate1][cate2])\n",
    "                else:\n",
    "                    tmp.append(0)\n",
    "    comp_cate_num.append(tmp)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 312,
   "metadata": {},
   "outputs": [],
   "source": [
    "def convert_cate_num(cate_num):\n",
    "    ret = []\n",
    "    for line in cate_num:\n",
    "        line = np.array(line)\n",
    "        ret.append([line.min()])\n",
    "    return np.array(ret)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 313,
   "metadata": {},
   "outputs": [],
   "source": [
    "def sample_n_negative(dataset, n):\n",
    "    ret = []\n",
    "    for i in range(n):\n",
    "        nums = np.random.choice([4, 5, 6, 7, 8])\n",
    "        tmp = []\n",
    "        for j in range(nums):\n",
    "            exists = []\n",
    "            i1 = np.random.randint(len(dataset.js))\n",
    "            while i1 in exists:\n",
    "                i1 = np.random.randint(len(dataset.js))\n",
    "            j1= np.random.choice(range(len(dataset.js[i1]['items'])))\n",
    "            exists.append(i1)\n",
    "            tmp.append(get_cateid(dataset, i1, j1))\n",
    "        ret.append(tmp)\n",
    "    return ret\n",
    "\n",
    "def parse_cate_num(cates, cnt):\n",
    "    cate_num = []\n",
    "    for outfit_cate in negative_cates:\n",
    "        tmp = []\n",
    "        for i in range(len(outfit_cate)):\n",
    "            for j in range(len(outfit_cate)):\n",
    "                 if (i != j):\n",
    "                    cate1 = outfit_cate[i]\n",
    "                    cate2 = outfit_cate[j]\n",
    "                    if cate2 in cnt[cate1]:\n",
    "                        tmp.append(cnt[cate1][cate2])\n",
    "                    else:\n",
    "                        tmp.append(0)\n",
    "        cate_num.append(tmp)\n",
    "    return cate_num"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 314,
   "metadata": {},
   "outputs": [],
   "source": [
    "negative_cates = sample_n_negative(train_dataset, len(positive_cates))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 315,
   "metadata": {},
   "outputs": [],
   "source": [
    "negative_cate_num = parse_cate_num(negative_cates, cnt)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## start"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 316,
   "metadata": {},
   "outputs": [],
   "source": [
    "import xgboost"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 317,
   "metadata": {},
   "outputs": [],
   "source": [
    "positive_sta = convert_cate_num(outfit_cate_num)\n",
    "comp_sta = convert_cate_num(comp_cate_num)\n",
    "negative_sta = convert_cate_num(negative_cate_num)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 318,
   "metadata": {},
   "outputs": [],
   "source": [
    "positive = pd.DataFrame(positive_sta)\n",
    "positive['label'] = 1\n",
    "negative = pd.DataFrame(negative_sta)\n",
    "negative['label'] = 0\n",
    "train = pd.concat([positive, negative], axis=0)\n",
    "test = pd.DataFrame(comp_sta)\n",
    "test['label'] = comp_dataset.labels"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 319,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.6708847935630688"
      ]
     },
     "execution_count": 319,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "xgb = xgboost.XGBRegressor()\n",
    "xgb.fit(train.drop('label', axis=1).values, train['label'].values)\n",
    "ret = xgb.predict(test.drop('label', axis=1).values)\n",
    "roc_auc_score(test.label, ret)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 320,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "array([0.7416862 , 0.5817155 , 0.3708316 , ..., 0.00269401, 0.5625059 ,\n",
       "       0.5931865 ], dtype=float32)"
      ]
     },
     "execution_count": 320,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ret"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
