{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "scrolled": true
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "override USE_NET <class 'src.pure_image_networks.VOTNetworks'>\n",
      "override DATASET_PROC_METHOD_TRAIN Rescale224\n",
      "override DATASET_PROC_METHOD_VAL Rescale224\n",
      "override VOT_MARGIN 0.2\n",
      "override VOT_SCORE_USE_CENTER True\n",
      "override GENERAL_METRICS True\n",
      "override USE_PRETRAINED_WORD_EMBEDDING False\n",
      "override OUTFIT_ITEM_PAD_NUM 8\n",
      "override OUTFIT_NAME_PAD_NUM 10\n",
      "override WORD_EMBED_SIZE 300\n",
      "override MAX_VOCAB_SIZE 300\n",
      "override IMAGE_EMBED_SIZE 300\n",
      "override EMBED_MARGIN 0.2\n",
      "override WEIGHT_OUTFIT_TRIPLET 1.0\n",
      "override NUM_EPOCH 70\n",
      "override LEARNING_RATE 0.0001\n",
      "override LEARNING_RATE_DECAY 0.95\n",
      "override BATCH_SIZE 2\n",
      "override SAVE_EVERY_STEPS 10000\n",
      "override SAVE_EVERY_EPOCHS 1\n",
      "override VAL_WHILE_TRAIN True\n",
      "override VAL_FASHION_COMP_FILE fashion_compatibility_small.txt\n",
      "override VAL_FITB_FILE fill_in_blank_test_small.json\n",
      "override VAL_BATCH_SIZE 8\n",
      "override VAL_EVERY_STEPS 1000\n",
      "override VAL_EVERY_EPOCHS 1\n",
      "override VAL_START_EPOCH 1\n",
      "override device cuda:0\n",
      "override TRAIN_DIR runs/src.conf.pure_image.vot2_gm/11-04 03:45:22\n",
      "override VAL_DIR runs/src.conf.pure_image.vot2_gm/11-04 03:45:22\n",
      "override MODEL_NAME src.conf.pure_image.vot2_gm\n"
     ]
    }
   ],
   "source": [
    "import torch\n",
    "import torch.utils.data\n",
    "from src.const import base_path\n",
    "import numpy as np\n",
    "import cv2\n",
    "from torchvision import transforms\n",
    "import matplotlib.pyplot as plt\n",
    "import pandas as pd\n",
    "from skimage import io, transform\n",
    "import skimage\n",
    "from src import const\n",
    "import json\n",
    "import os\n",
    "import nltk\n",
    "from src.utils import load_json, build_vocab, Vocab\n",
    "from src.dataset import PolyvoreDataset, SimpleImageDataset, CompatibilityBenchmarkDataset, FITBDataset\n",
    "from src.base_networks import *\n",
    "from src.networks import *\n",
    "from src.pure_image_networks import *\n",
    "from torch import nn\n",
    "import torchvision\n",
    "from torch.nn import functional as F\n",
    "from src.utils import merge_const\n",
    "merge_const('src.conf.pure_image.vot2_gm')\n",
    "const.BATCH_SIZE = 2\n",
    "const.device = 'cpu'\n",
    "class _(object):\n",
    "    pass\n",
    "self = _()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Fashion Compatibility Test, Use File: /home/hzy/datasets/polyvore/fashion_compatibility_small.txt\n",
      "FITB Test, Use File: /home/hzy/datasets/polyvore/fill_in_blank_test_small.json\n",
      "dict_keys(['raw_images', 'images', 'image_mask', 'item_nums', 'word_ids', 'word_mask', 'word_lengths', 'word_detail_mask', 'word_embedding_divider'])\n"
     ]
    }
   ],
   "source": [
    "train_set = load_json(os.path.join(const.base_path, 'train_no_dup.json'))\n",
    "valid_set = load_json(os.path.join(const.base_path, 'valid_no_dup.json'))\n",
    "test_set = load_json(os.path.join(const.base_path, 'test_no_dup.json'))\n",
    "vocab = build_vocab(train_set)\n",
    "train_dataset = PolyvoreDataset(train_set, const.DATASET_PROC_METHOD_TRAIN, vocab)\n",
    "train_dataloader = torch.utils.data.DataLoader(train_dataset, batch_size=const.BATCH_SIZE, shuffle=True, num_workers=4)\n",
    "comp_dataset = CompatibilityBenchmarkDataset(const.DATASET_PROC_METHOD_VAL)\n",
    "fitb_dataset = FITBDataset(const.DATASET_PROC_METHOD_VAL)\n",
    "fitb_dataloader = torch.utils.data.DataLoader(fitb_dataset, batch_size=1, shuffle=False, num_workers=1)\n",
    "sample = iter(train_dataloader).next()\n",
    "print(sample.keys())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 45,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "class OutfitTripletLoss(object):\n",
    "    '''\n",
    "    输入：\n",
    "        image_embedding：[2, outfit_item_pad, embed_size]\n",
    "        image_mask: [2, outfit_item_pad]\n",
    "    输出：\n",
    "        利用这两个outfit来组合triplet loss\n",
    "    '''\n",
    "    \n",
    "    def __call__(self, image_embedding, image_mask, metrics=None):\n",
    "        assert image_embedding.shape[0] == 2\n",
    "        if metrics is None:\n",
    "            embedding1 = image_embedding[0]\n",
    "            embedding2 = image_embedding[1]\n",
    "            valid1 = image_mask[0, :].unsqueeze(dim=1)\n",
    "            valid2 = image_mask[1, :].unsqueeze(dim=1)\n",
    "            scores_same = embedding1.matmul(embedding1.transpose(0, 1))\n",
    "            scores_diff = embedding1.matmul(embedding2.transpose(0, 1))\n",
    "            scores = F.relu(scores_diff - scores_same + const.VOT_MARGIN)\n",
    "            valid = valid1.matmul(valid1.transpose(0, 1))\n",
    "            valid = valid * (valid1.matmul(valid2.transpose(0, 1)))\n",
    "            valid = valid - torch.diag(valid.diag())  # 对角线上不对\n",
    "            loss = (scores * valid).sum() / valid.sum()\n",
    "            return loss\n",
    "        else:\n",
    "            embedding1 = image_embedding[0]\n",
    "            embedding2 = image_embedding[1]\n",
    "            valid1 = image_mask[0, :].detach().cpu().numpy().tolist()\n",
    "            valid2 = image_mask[1, :].detach().cpu().numpy().tolist()\n",
    "\n",
    "            loss = []\n",
    "            valid_cnt = 0.\n",
    "            for i in range(const.OUTFIT_ITEM_PAD_NUM):\n",
    "                for j in range(const.OUTFIT_ITEM_PAD_NUM):\n",
    "                    if i == j or int(valid1[i]) == 0 or int(valid2[i]) == 0:\n",
    "                        continue\n",
    "                    loss.append(F.relu(\n",
    "                        metrics(embedding1[i] * embedding2[j]) - metrics(embedding1[i] * embedding1[j]) + const.VOT_MARGIN,\n",
    "                    ))\n",
    "                    valid_cnt += 1.\n",
    "            loss = sum(loss)\n",
    "            loss = loss / valid_cnt\n",
    "            return loss\n",
    "\n",
    "class VOTNetworks(ModuleWithAttr):\n",
    "    '''\n",
    "    只有图像的信息\n",
    "    '''\n",
    "    def __init__(self, pretrained_embeddings=None):\n",
    "        super(VOTNetworks, self).__init__()\n",
    "        self.image_embedding = ImageEmbedding()\n",
    "        self.outfit_triplet_loss = OutfitTripletLoss()\n",
    "        if const.VOT_SCORE_USE_CENTER is False:\n",
    "            self.score_loss = NewOutfitIntraItemLoss()\n",
    "        else:\n",
    "            self.score_loss = NewIntraOutfitCenterLoss()\n",
    "        # 注意：这个metrics也应该和内积一样，越大越好！！\n",
    "        if const.GENERAL_METRICS is True:\n",
    "            print('USE GENERAL METRICS')\n",
    "            self.metric_branch = nn.Linear(const.IMAGE_EMBED_SIZE, 1, bias=False)\n",
    "            # initilize as having an even weighting across all dimensions\n",
    "        #     weight = torch.zeros(1,const.IMAGE_EMBED_SIZE)/float(const.IMAGE_EMBED_SIZE)\n",
    "            weight = torch.ones(1,const.IMAGE_EMBED_SIZE) # 原来全是0，不大合理啊，先改下\n",
    "            self.metric_branch.weight = nn.Parameter(weight)\n",
    "        else:\n",
    "            self.metric_branch = None\n",
    "\n",
    "    def forward(self, sample):\n",
    "        image_embedding = self.image_embedding(sample)\n",
    "        image_mask = sample['image_mask']\n",
    "        output = {\n",
    "            'image_embedding': image_embedding,\n",
    "            'image_mask': image_mask,\n",
    "        }\n",
    "        return output\n",
    "\n",
    "    def cal_loss(self, sample, output):\n",
    "        image_embedding = output['image_embedding']\n",
    "        image_mask = output['image_mask']\n",
    "        # 还是做norm和scale factor\n",
    "        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)\n",
    "        if hasattr(const, 'SCALE_FACTOR'):\n",
    "            image_embedding = const.SCALE_FACTOR * image_embedding\n",
    "\n",
    "        outfit_triplet_loss = self.outfit_triplet_loss(image_embedding, image_mask, self.metric_branch)\n",
    "\n",
    "        loss_structure = [\n",
    "            ['outfit_triplet_loss', const.WEIGHT_OUTFIT_TRIPLET, outfit_triplet_loss],\n",
    "        ]\n",
    "\n",
    "        all_loss = sum([loss_pair[1] * loss_pair[2] for loss_pair in loss_structure])\n",
    "\n",
    "        return {\n",
    "            'structure': loss_structure,\n",
    "            'all': all_loss,\n",
    "        }\n",
    "\n",
    "    def score_compatibility(self, output):  # 如果有这个函数，就用这个函数来score\n",
    "        image_embedding = output['image_embedding']\n",
    "        image_mask = output['image_mask']\n",
    "        # 还是做norm和scale factor\n",
    "        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)\n",
    "        centers = (image_mask.unsqueeze(2).float() * image_embedding).sum(dim=1) / image_mask.sum(dim=1, keepdim=True).float()\n",
    "        # 对centers做norm\n",
    "        centers = centers / (torch.norm(centers, p=2, dim=1, keepdim=True) + 1e-9)\n",
    "        # Scale Factor?\n",
    "        if hasattr(const, 'SCALE_FACTOR'):\n",
    "            centers = const.SCALE_FACTOR * centers\n",
    "            image_embedding = const.SCALE_FACTOR * image_embedding\n",
    "        if const.VOT_SCORE_USE_CENTER is False:\n",
    "            loss = self.score_loss(image_embedding, image_mask)\n",
    "            return loss['every']\n",
    "        else:\n",
    "            loss = self.score_loss(image_embedding, centers, image_mask)\n",
    "            return loss['every']\n",
    "    \n",
    "    def fitb_ans(self, sample):\n",
    "        image_embedding = self.image_embedding({'images': sample['images']})\n",
    "        answer_embedding = self.image_embedding({'images': sample['answer_images']})\n",
    "        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)\n",
    "        answer_embedding = answer_embedding / (torch.norm(answer_embedding, p=2, dim=2, keepdim=True) + 1e-9)\n",
    "        centers = image_embedding.mean(dim=0, keepdim=True)\n",
    "        centers = centers / (torch.norm(centers, p=2, dim=1, keepdim=True) + 1e-9)\n",
    "        # Scale Factor?\n",
    "        if hasattr(const, 'SCALE_FACTOR'):\n",
    "            centers = const.SCALE_FACTOR * centers\n",
    "            image_embedding = const.SCALE_FACTOR * image_embedding\n",
    "            answer_embedding = const.SCALE_FACTOR * answer_embedding\n",
    "        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])\n",
    "        answer_embedding = answer_embedding.reshape(-1, image_embedding.shape[-1])\n",
    "        if const.GENERAL_METRICS is False:\n",
    "            if const.VOT_FITB_USE_CENTER is True:\n",
    "                scores = centers.matmul(answer_embedding.transpose(0, 1))\n",
    "                ans_index = torch.argmin(scores).item()\n",
    "            else:\n",
    "                scores = image_embedding.matmul(answer_embedding.transpose(0, 1))\n",
    "                scores = scores.mean(dim=0)  # 内积越大越靠近\n",
    "                ans_index = torch.argmax(scores).item()\n",
    "        else:        \n",
    "            ans_scores = []\n",
    "            for i in range(answer_embedding.shape[0]):\n",
    "                score = 0\n",
    "                for j in range(image_embedding.shape[0]):\n",
    "                    # 这个metrics也应该和内积一样，越大越好\n",
    "                    score += self.metric_branch(answer_embedding[i] * image_embedding[j]).item()\n",
    "                ans_scores.append(score)\n",
    "            ans_index = np.argmax(ans_scores)\n",
    "        return ans_index\n",
    "    \n",
    "    def get_embedding(self, sample):\n",
    "        # image: 3 x 224 x 224\n",
    "        sample['images'] = sample['images'].unsqueeze(dim=0)\n",
    "        image_embedding = self.image_embedding(sample)\n",
    "        image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)\n",
    "        if hasattr(const, 'SCALE_FACTOR'):\n",
    "            image_embedding = const.SCALE_FACTOR * image_embedding\n",
    "        image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])\n",
    "        return image_embedding"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 46,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "USE GENERAL METRICS\n"
     ]
    }
   ],
   "source": [
    "net = VOTNetworks()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 47,
   "metadata": {},
   "outputs": [],
   "source": [
    "output = net(sample)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 68,
   "metadata": {},
   "outputs": [],
   "source": [
    "sample = iter(fitb_dataloader).next()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 70,
   "metadata": {},
   "outputs": [],
   "source": [
    "self.image_embedding = ImageEmbedding()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 78,
   "metadata": {},
   "outputs": [],
   "source": [
    "image_embedding = self.image_embedding({'images': sample['images']})\n",
    "answer_embedding = self.image_embedding({'images': sample['answer_images']})\n",
    "image_embedding = image_embedding / (torch.norm(image_embedding, p=2, dim=2, keepdim=True) + 1e-9)\n",
    "answer_embedding = answer_embedding / (torch.norm(answer_embedding, p=2, dim=2, keepdim=True) + 1e-9)\n",
    "centers = image_embedding.mean(dim=1)\n",
    "centers = centers / (torch.norm(centers, p=2, dim=1, keepdim=True) + 1e-9)\n",
    "# Scale Factor?\n",
    "if hasattr(const, 'SCALE_FACTOR'):\n",
    "    centers = const.SCALE_FACTOR * centers\n",
    "    image_embedding = const.SCALE_FACTOR * image_embedding\n",
    "    answer_embedding = const.SCALE_FACTOR * answer_embedding\n",
    "image_embedding = image_embedding.reshape(-1, image_embedding.shape[-1])\n",
    "answer_embedding = answer_embedding.reshape(-1, image_embedding.shape[-1])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 82,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 83,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 84,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0"
      ]
     },
     "execution_count": 84,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "ans_index"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 85,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "tensor([[ 0.8455,  0.8122,  0.8112,  0.8148]])"
      ]
     },
     "execution_count": 85,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "scores"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
