{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "from src.dataset import DeepFashionCAPDataset\n",
    "from src.const import base_path\n",
    "import matplotlib.pyplot as plt\n",
    "from src.networks import VGG16BaselineNet\n",
    "import pandas as pd\n",
    "import torch\n",
    "import torch.utils.data\n",
    "from torch import nn\n",
    "import numpy as np\n",
    "from torch.nn import functional as F\n",
    "from src.const import base_path\n",
    "from src import const\n",
    "from src.utils import topk_accuracy, metrics_multilabel\n",
    "from tensorboardX import SummaryWriter\n",
    "from src import dataset\n",
    "%matplotlib inline"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 124,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "'05-24 18:05:24'"
      ]
     },
     "execution_count": 124,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "import time\n",
    "time.strftime('%m-%d %H:%m:%S', time.localtime())\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n",
    "\n",
    "df = pd.read_csv(base_path + 'info.csv')\n",
    "test_df = df[df['evaluation_status'] == 'test']\n",
    "test_dataset = DeepFashionCAPDataset(test_df, mode='CENTER')\n",
    "test_dataloader = torch.utils.data.DataLoader(test_dataset, batch_size=16, shuffle=True, num_workers=1)\n",
    "i = iter(test_dataloader)\n",
    "samples = [i.next() for _ in range(10)]\n",
    "net = VGG16BaselineNet().to(device)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "net.load_state_dict(torch.load('models/vgg16.pkl'))\n",
    "_ = net.eval()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "sample = samples[0]\n",
    "for key in sample:\n",
    "    sample[key] = sample[key].to(device)\n",
    "output = net(sample)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 106,
   "metadata": {},
   "outputs": [],
   "source": [
    "import src.settings.no_sigmoid as conf"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 114,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "import importlib"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 117,
   "metadata": {},
   "outputs": [],
   "source": [
    "conf = importlib.import_module('src.settings.no_sigmoid')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 110,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "new_conf = __import__()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 113,
   "metadata": {},
   "outputs": [],
   "source": [
    "__import__?"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 109,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "base_path\n",
      "NUM_EPOCH\n",
      "LEARNING_RATE\n",
      "BATCH_SIZE\n",
      "WEIGHT_ATTR_NEG\n",
      "WEIGHT_ATTR_POS\n",
      "WEIGHT_LANDMARK_VIS_NEG\n",
      "WEIGHT_LANDMARK_VIS_POS\n",
      "WEIGHT_LOSS_CATEGORY\n",
      "WEIGHT_LOSS_ATTR\n",
      "WEIGHT_LOSS_LM_VIS\n",
      "WEIGHT_LOSS_LM_POS\n",
      "VAL_CATEGORY_TOP_N\n",
      "VAL_LM_RELATIVE_DIS\n"
     ]
    }
   ],
   "source": [
    "for key in conf.__dict__:\n",
    "    if not(key.startswith('_')):\n",
    "        print(key)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "def metrics_multilabel(output, target):\n",
    "    pred = torch.argmax(output, dim=1).cpu().numpy()\n",
    "    target = target.cpu().numpy()\n",
    "    # 去除标记全部为0的\n",
    "    pred = pred[target.sum(axis=1) != 0]\n",
    "    target = target[target.sum(axis=1) != 0]\n",
    "    tp = ((pred == 1) & (target == 1)).sum(axis=1)\n",
    "    fp = ((pred == 1) & (target == 0)).sum(axis=1)\n",
    "    tn = ((pred == 0) & (target == 0)).sum(axis=1)\n",
    "    fn = ((pred == 0) & (target == 1)).sum(axis=1)\n",
    "    # tp, ground truth positive, predicted positive\n",
    "    return tp.sum(), (tp + fn).sum(), (tp + fp).sum()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 22,
   "metadata": {},
   "outputs": [],
   "source": [
    "# batch_size, 8\n",
    "lm_dist = torch.sqrt(\n",
    "    torch.sum(torch.pow((output['lm_pos_output'] - sample['landmark_pos_normalized']), 2), dim=2)\n",
    ").cpu().detach().numpy()\n",
    "gt_lm_vis = sample['landmark_vis'].cpu().numpy()\n",
    "pred_lm_vis = torch.argmax(output['lm_vis_output'], dim=1).cpu().numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 27,
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 28,
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "71"
      ]
     },
     "execution_count": 28,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "vis_pred_p"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "attr_eval = []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "attr_eval.append(metrics_multilabel(output['attr_output'], sample['attr']))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "attr_recall = float(sum([x[0] for x in attr_eval])) / sum([x[1] for x in attr_eval])\n",
    "attr_precision = float(sum([x[0] for x in attr_eval])) / sum([x[2] for x in attr_eval])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 57,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class Evaluator(object):\n",
    "    \n",
    "    def __init__(self, category_topk=(1, 3, 5), lm_relative_threshold=0.1):\n",
    "        self.reset()\n",
    "        self.category_topk = category_topk\n",
    "        self.lm_relative_threshold = lm_relative_threshold\n",
    "\n",
    "    def reset(self):\n",
    "        self.category_accuracy = []\n",
    "        self.attr_tp = 0\n",
    "        self.attr_gt_p = 0\n",
    "        self.attr_pred_p = 0\n",
    "        self.lm_vis_tp = 0\n",
    "        self.lm_vis_gt_p = 0\n",
    "        self.lm_vis_pred_p = 0\n",
    "        self.lm_pos_tp = 0\n",
    "        self.lm_pos_gt_p = 0\n",
    "        self.lm_pos_pred_p = 0\n",
    "    \n",
    "    def category_topk_accuracy(self, output, target):\n",
    "        with torch.no_grad():\n",
    "            maxk = max(self.category_topk)\n",
    "            batch_size = target.size(0)\n",
    "\n",
    "            _, pred = output.topk(maxk, 1, True, True)\n",
    "            pred = pred.t()\n",
    "            correct = pred.eq(target.view(1, -1).expand_as(pred))\n",
    "\n",
    "            res = []\n",
    "            for k in self.category_topk:\n",
    "                correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n",
    "                res.append(correct_k.mul_(100 / batch_size))\n",
    "            for i in range(len(res)):\n",
    "                res[i] = res[i].cpu().numpy()[0] / 100\n",
    "            \n",
    "            self.category_accuracy.append(res)\n",
    "    \n",
    "    def attr_count(self, output, target):\n",
    "        pred = torch.argmax(output, dim=1).cpu().numpy()\n",
    "        target = target.cpu().numpy()\n",
    "        # 去除标记全部为0的\n",
    "        pred = pred[target.sum(axis=1) != 0]\n",
    "        target = target[target.sum(axis=1) != 0]\n",
    "        tp = ((pred == 1) & (target == 1)).sum(axis=1)\n",
    "        fp = ((pred == 1) & (target == 0)).sum(axis=1)\n",
    "        tn = ((pred == 0) & (target == 0)).sum(axis=1)\n",
    "        fn = ((pred == 0) & (target == 1)).sum(axis=1)\n",
    "        # tp, ground truth positive, predicted positive\n",
    "        self.attr_tp += tp.sum()\n",
    "        self.attr_gt_p += (tp + fn).sum()\n",
    "        self.attr_pred_p += (tp + fp).sum()\n",
    "    \n",
    "    def landmark_count(self, output, sample):\n",
    "        lm_dist = torch.sqrt(\n",
    "            torch.sum(torch.pow((output['lm_pos_output'] - sample['landmark_pos_normalized']), 2), dim=2)\n",
    "        ).cpu().detach().numpy()\n",
    "        gt_lm_vis = sample['landmark_vis'].cpu().numpy()\n",
    "        pred_lm_vis = torch.argmax(output['lm_vis_output'], dim=1).cpu().numpy()\n",
    "    \n",
    "        self.lm_vis_tp += ((gt_lm_vis == 1) & (pred_lm_vis == 1)).sum()\n",
    "        self.lm_vis_gt_p += (gt_lm_vis == 1).sum()\n",
    "        self.lm_vis_pred_p += (pred_lm_vis == 1).sum()\n",
    "        self.lm_pos_tp += ((gt_lm_vis == 1) & (pred_lm_vis == 1) & (lm_dist < self.lm_relative_threshold)).sum()\n",
    "        self.lm_pos_gt_p += (gt_lm_vis == 1).sum()\n",
    "        self.lm_pos_pred_p += (pred_lm_vis == 1).sum()\n",
    "\n",
    "    \n",
    "    def add(self, output, sample):\n",
    "        self.category_topk_accuracy(output['category_output'], sample['category_label'])\n",
    "        self.attr_count(output['attr_output'], sample['attr'])\n",
    "        self.landmark_count(output, sample)\n",
    "        \n",
    "    \n",
    "    def evaluate(self):\n",
    "        category_accuracy = np.array(self.category_accuracy).mean(axis=0)\n",
    "        category_accuracy_topk  = {}\n",
    "        for i, top_n in enumerate(const.VAL_CATEGORY_TOP_N):\n",
    "            category_accuracy_topk[top_n] = category_accuracy[i]\n",
    "        attr_recall = float(self.attr_tp) / self.attr_gt_p\n",
    "        attr_precision = float(self.attr_tp) / self.attr_pred_p\n",
    "        lm_vis_recall = float(self.lm_vis_tp) / self.lm_vis_gt_p\n",
    "        lm_vis_precision = float(self.lm_vis_tp) / self.lm_vis_pred_p\n",
    "        lm_pos_recall = float(self.lm_pos_tp) / self.lm_pos_gt_p\n",
    "        lm_pos_precision = float(self.lm_pos_tp) / self.lm_pos_pred_p\n",
    "        \n",
    "        return {\n",
    "            'category_accuracy_topk': category_accuracy_topk,\n",
    "            'attr_recall': attr_recall,\n",
    "            'attr_precision': attr_precision,\n",
    "            'lm_vis_recall': lm_vis_recall,\n",
    "            'lm_vis_precision': lm_vis_precision,\n",
    "            'lm_pos_recall': lm_pos_recall,\n",
    "            'lm_pos_precision': lm_pos_precision,\n",
    "        }\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 97,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": [
    "class TestModule(nn.Module):\n",
    "    \n",
    "    def __init__(self):\n",
    "        super(TestModule, self).__init__()\n",
    "        self.step = 100"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "ename": "TypeError",
     "evalue": "tensor is not a torch image.",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mTypeError\u001b[0m                                 Traceback (most recent call last)",
      "\u001b[0;32m<ipython-input-11-5c3d0842e319>\u001b[0m in \u001b[0;36m<module>\u001b[0;34m()\u001b[0m\n\u001b[1;32m      1\u001b[0m image = np.array(test_dataset.to_pil(\n\u001b[1;32m      2\u001b[0m     test_dataset.unnormalize(\n\u001b[0;32m----> 3\u001b[0;31m         \u001b[0msample\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0;34m'image'\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0msqueeze\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mdim\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;36m0\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mcpu\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m      4\u001b[0m     )\n\u001b[1;32m      5\u001b[0m ))\n",
      "\u001b[0;32m~/anaconda2/envs/py3/lib/python3.6/site-packages/torchvision/transforms/transforms.py\u001b[0m in \u001b[0;36m__call__\u001b[0;34m(self, tensor)\u001b[0m\n\u001b[1;32m    141\u001b[0m             \u001b[0mTensor\u001b[0m\u001b[0;34m:\u001b[0m \u001b[0mNormalized\u001b[0m \u001b[0mTensor\u001b[0m \u001b[0mimage\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    142\u001b[0m         \"\"\"\n\u001b[0;32m--> 143\u001b[0;31m         \u001b[0;32mreturn\u001b[0m \u001b[0mF\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mnormalize\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mmean\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mself\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0mstd\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    144\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    145\u001b[0m     \u001b[0;32mdef\u001b[0m \u001b[0m__repr__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mself\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;32m~/anaconda2/envs/py3/lib/python3.6/site-packages/torchvision/transforms/functional.py\u001b[0m in \u001b[0;36mnormalize\u001b[0;34m(tensor, mean, std)\u001b[0m\n\u001b[1;32m    163\u001b[0m     \"\"\"\n\u001b[1;32m    164\u001b[0m     \u001b[0;32mif\u001b[0m \u001b[0;32mnot\u001b[0m \u001b[0m_is_tensor_image\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 165\u001b[0;31m         \u001b[0;32mraise\u001b[0m \u001b[0mTypeError\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m'tensor is not a torch image.'\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m    166\u001b[0m     \u001b[0;31m# TODO: make efficient\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m    167\u001b[0m     \u001b[0;32mfor\u001b[0m \u001b[0mt\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mm\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0ms\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mzip\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mtensor\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mmean\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mstd\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
      "\u001b[0;31mTypeError\u001b[0m: tensor is not a torch image."
     ]
    }
   ],
   "source": [
    "image = np.array(test_dataset.to_pil(\n",
    "    test_dataset.unnormalize(\n",
    "        sample['image'].squeeze(dim=0).cpu()\n",
    "    )\n",
    "))\n",
    "\n",
    "h, w = image.shape[:2]\n",
    "\n",
    "gt_lm_pos = sample['landmark_pos'].squeeze(dim=0).cpu().numpy()\n",
    "gt_lm_vis = sample['landmark_vis'].squeeze(dim=0).cpu().numpy()\n",
    "pred_lm_pos = output['lm_pos_output'].squeeze(dim=0).cpu().detach().numpy() * [w, h]\n",
    "pred_lm_vis = torch.argmax(output['lm_vis_output'].squeeze(dim=0), dim=0).cpu().detach().numpy()\n",
    "\n",
    "gt_category = sample['category_label'].squeeze(dim=0).item()\n",
    "pred_category =  torch.argmax(output['category_output'].squeeze(dim=0), dim=0).item()\n",
    "\n",
    "plt.figure(dpi=200)\n",
    "plt.imshow(image)\n",
    "for i, vis in enumerate(gt_lm_vis):\n",
    "    if (vis == 1):\n",
    "        plt.scatter([gt_lm_pos[i, 0]], [gt_lm_pos[i, 1]], s=20, marker='.', c='g')\n",
    "        plt.text(gt_lm_pos[i, 0], gt_lm_pos[i, 1], i, color='g', size=7)\n",
    "for i, vis in enumerate(pred_lm_vis):\n",
    "    if (vis == 1):\n",
    "        plt.scatter([pred_lm_pos[i, 0]], [pred_lm_pos[i, 1]], s=20, marker='.', c='r')\n",
    "        plt.text(pred_lm_pos[i, 0], pred_lm_pos[i, 1], i, color='r', size=7)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "collapsed": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.6.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
