{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import mxnet as mx\n",
    "from data_loader import get_iterators\n",
    "from utils import *\n",
    "data_shape = (3,512,512)\n",
    "batch_size = 4\n",
    "std = np.array([58.395, 57.12, 57.375]) # mxnet默认参数，并非来自数据集\n",
    "rgb_mean = np.array([130.063048, 129.967301, 124.410760]) # 同上\n",
    "ctx = mx.gpu(0)\n",
    "resize = data_shape[1:]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "rec_prefix = \"data/rec/img_\" + str(resize[0]) + \"_\" + str(resize[1])\n",
    "train_data, valid_data, class_names, num_class = get_iterators(rec_prefix, data_shape, batch_size) # 使用了数据增强\n",
    "\n",
    "train_data.reset()\n",
    "batch = train_data.next()\n",
    "images = batch.data[0][:]\n",
    "labels = batch.label[0][:]\n",
    "\n",
    "show_images(images.asnumpy(), labels.asnumpy(), rgb_mean, std, show_text=True, fontsize=6, MN=(2, 4))\n",
    "# show_9_images(images.asnumpy(), labels, rgb_mean)\n",
    "print (labels.shape)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from model import SSD, sizes_list, ratios_list\n",
    "# 可在model.py中更改模型类型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": false
   },
   "outputs": [],
   "source": [
    "import time\n",
    "from mxnet.ndarray.contrib import MultiBoxTarget, MultiBoxPrior\n",
    "from mxnet import ndarray as nd\n",
    "from mxnet import gluon\n",
    "net = SSD(1,ctx=mx.cpu(), verbose=True, prefix=\"ssd_\")\n",
    "print (net)\n",
    "#net.initialize()\n",
    "tic = time.time()\n",
    "anchors, box_preds, cls_preds = net(batch.data[0])\n",
    "print (time.time() - tic)\n",
    "box_offset, box_mask, cls_labels = MultiBoxTarget(anchors, batch.label[0], cls_preds.transpose(axes=(0, 2, 1)))\n",
    "print (cls_labels.shape, cls_preds.shape, box_offset.shape, box_mask.shape)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "对于分类，我们使用的是关注损失\n",
    "$$L = -\\alpha * (1 - P) * log(P)$$\n",
    "关注损失会降低预测正确的损失，增大预测错误的损失。\n",
    "\n",
    "对于回归，使用Smooth-L1损失，这是由于L1在误差增大时损失增加较缓慢，增加计算稳定性。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "\n",
    "\"\"\"\n",
    "loss define\n",
    "\"\"\"\n",
    "class FocalLoss(gluon.loss.Loss):\n",
    "    def __init__(self, axis=-1, alpha=0.25, gama=2, batch_axis=0, **kwargs):\n",
    "        super(FocalLoss, self).__init__(None, batch_axis, **kwargs)\n",
    "        self.alpha = alpha\n",
    "        self.gama = gama\n",
    "        self.axis = axis\n",
    "        self.batch_axis = batch_axis\n",
    "        \n",
    "    def hybrid_forward(self, F, y, label):\n",
    "        y = F.softmax(y)\n",
    "        py = y.pick(label, axis=self.axis, keepdims=True)\n",
    "        loss = - (self.alpha * ((1 - py) ** self.gama)) * py.log()\n",
    "        return loss.mean(axis=self.batch_axis, exclude=True)\n",
    "    \n",
    "class SmoothL1Loss(gluon.loss.Loss):\n",
    "    def __init__(self, batch_axis=0, **kwargs):\n",
    "        super(SmoothL1Loss, self).__init__(None, batch_axis, **kwargs)\n",
    "        self.batch_axis = batch_axis\n",
    "        \n",
    "    def hybrid_forward(self, F, y, label, mask):\n",
    "        loss = F.smooth_l1((y - label) * mask, scalar=1.0)\n",
    "        return loss.mean(axis=self.batch_axis, exclude=True)\n",
    "    \n",
    "cls_loss = FocalLoss()\n",
    "#cls_loss = gluon.loss.SoftmaxCrossEntropyLoss()\n",
    "box_loss = SmoothL1Loss()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "这里使用AP分数作为分类评价的标准。\n",
    "由于在模型检测问题中，反例占据了绝大多数，即使把所有的边框全部预测为反例已然会具有不错的精度。因此不能直接使用分类精度作为评价标准。\n",
    "AP曲线考虑在预测为正例的标签中真正为正例的概率（查准率， precise）以及在全部正例中预测为正例的概率（召回率， recall），更能反映模型的正确性。\n",
    "\n",
    "使用MAE（平均绝对值误差）作为回归评价的标准。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\"\"\"\n",
    "train net\n",
    "\"\"\"\n",
    "from mxnet import metric\n",
    "from mxnet import autograd\n",
    "from mxnet.ndarray.contrib import MultiBoxDetection\n",
    "import numpy as np\n",
    "train_data.reshape(label_shape=(3, 5))\n",
    "valid_data.reshape(label_shape=(3,5))\n",
    "\n",
    "def evaluate_acc(data_iter, ctx):\n",
    "    data_iter.reset()\n",
    "    box_metric = metric.MAE()\n",
    "    outs, labels = None, None\n",
    "    for i, batch in enumerate(data_iter):\n",
    "        data = batch.data[0].as_in_context(ctx)\n",
    "        label = batch.label[0].as_in_context(ctx)\n",
    "        anchors, box_preds, cls_preds = net(data)\n",
    "        box_offset, box_mask, cls_labels = MultiBoxTarget(anchors, label, cls_preds.transpose(axes=(0, 2, 1)),\n",
    "                                                                  negative_mining_ratio=3.0)#, overlap_threshold=0.75)\n",
    "        box_metric.update([box_offset], [box_preds * box_mask])\n",
    "        \n",
    "        cls_probs = nd.SoftmaxActivation(cls_preds.transpose((0, 2, 1)), mode='channel')\n",
    "        out = MultiBoxDetection(cls_probs, box_preds, anchors, force_suppress=True, clip=False, nms_threshold=0.45)\n",
    "        if outs is None:\n",
    "            outs = out\n",
    "            labels = label\n",
    "        else:\n",
    "            outs = nd.concat(outs, out, dim=0)\n",
    "            labels = nd.concat(labels, label, dim=0)\n",
    "    AP = evaluate_MAP(outs, labels)\n",
    "    return AP, box_metric\n",
    "\n",
    "info = {\"train_ap\": [], \"valid_ap\": [], \"loss\": []}\n",
    "def train(net, start_epoch, end_epoch, trainer=None):\n",
    "    if trainer is None:\n",
    "        trainer = gluon.Trainer(net.collect_params(), 'sgd', {'learning_rate': 0.5, 'wd': 5e-4})\n",
    "    box_metric = metric.MAE()\n",
    "    \n",
    "    for e in range(start_epoch, end_epoch):\n",
    "        train_data.reset()\n",
    "        box_metric.reset()\n",
    "        tic = time.time()\n",
    "        _loss = [0,0]\n",
    "        if e == 100 or e == 150 or e==180:\n",
    "#         if e == 150 or e==180:\n",
    "            trainer.set_learning_rate(trainer.learning_rate * 0.5)\n",
    "            \n",
    "        outs, labels = None, None\n",
    "        for i, batch in enumerate(train_data):\n",
    "            data = batch.data[0].as_in_context(ctx)\n",
    "            label = batch.label[0].as_in_context(ctx)\n",
    "            \n",
    "            with autograd.record():\n",
    "                anchors, box_preds, cls_preds = net(data)\n",
    "#                 print(anchors, box_preds, cls_preds)\n",
    "                # negative_mining_ratio，在生成的mask中增加0.3的反例参加loss的计算。\n",
    "                box_offset, box_mask, cls_labels = MultiBoxTarget(anchors, label, cls_preds.transpose(axes=(0, 2, 1)),\n",
    "                                                                  negative_mining_ratio=3.0)#, overlap_threshold=0.75)\n",
    "                \n",
    "                loss1 = cls_loss(cls_preds, cls_labels)\n",
    "                loss2 = box_loss(box_preds, box_offset, box_mask)\n",
    "                loss = loss1 + loss2\n",
    "#                 print(loss)\n",
    "            loss.backward()\n",
    "            trainer.step(data.shape[0])\n",
    "            _loss[0] += nd.mean(loss1).asscalar()\n",
    "            _loss[1] += nd.mean(loss2).asscalar()\n",
    "            \n",
    "            cls_probs = nd.SoftmaxActivation(cls_preds.transpose((0, 2, 1)), mode='channel')\n",
    "            out = MultiBoxDetection(cls_probs, box_preds, anchors, force_suppress=True, clip=False, nms_threshold=0.45)\n",
    "            if outs is None:\n",
    "                outs = out\n",
    "                labels = label\n",
    "            else:\n",
    "                outs = nd.concat(outs, out, dim=0)\n",
    "                labels = nd.concat(labels, label, dim=0)\n",
    "            \n",
    "            box_metric.update([box_offset], [box_preds * box_mask])\n",
    "        \n",
    "        train_AP = evaluate_MAP(outs, labels)\n",
    "        valid_AP, val_box_metric = evaluate_acc(valid_data, ctx)\n",
    "        info[\"train_ap\"].append(train_AP)\n",
    "        info[\"valid_ap\"].append(valid_AP)\n",
    "        info[\"loss\"].append(_loss)\n",
    "        \n",
    "        if (e+1) % 10 == 0:\n",
    "            print (\"epoch: %d time: %.2f loss: %.4f, %.4f lr: %.4f\" % (e, time.time() - tic,_loss[0], _loss[1], trainer.learning_rate))\n",
    "            print (\"train mae: %.4f AP: %.4f\" % (box_metric.get()[1], train_AP))\n",
    "            print (\"valid mae: %.4f AP: %.4f\" % (val_box_metric.get()[1], valid_AP))\n",
    "\n",
    "net = SSD(num_class, sizes_list, ratios_list, ctx, verbose=False, prefix=\"ssd_\")\n",
    "# net.collect_params().initialize(ctx=ctx, force_reinit=True)\n",
    "train(net, 0, 200)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "import matplotlib.pyplot as plt\n",
    "%matplotlib inline\n",
    "def plot(key):\n",
    "    plt.plot(range(len(info[key])), info[key], label=key)\n",
    "\n",
    "\n",
    "info[\"loss\"] = np.array(info[\"loss\"])\n",
    "info[\"loss1\"] = info[\"loss\"][:, 0]\n",
    "info[\"loss2\"] = info[\"loss\"][:, 1]\n",
    "\n",
    "plt.figure(figsize=(12, 4))   # (w, h)\n",
    "plt.subplot(121)\n",
    "plot(\"train_ap\")\n",
    "plot(\"valid_ap\")\n",
    "plt.legend(loc=\"upper right\")\n",
    "plt.subplot(122)\n",
    "plot(\"loss1\")\n",
    "plot(\"loss2\")\n",
    "plt.legend(loc=\"upper right\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "def mkdir_if_not_exist(dirs):\n",
    "    if not os.path.exists(dirs):\n",
    "        os.makedirs(dirs)\n",
    "mkdir_if_not_exist(\"models\")\n",
    "net.save_params(\"models/papercup_vgg11bn29_512x512_data_sizes.param\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# 载入模型计算ROC"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "net = SSD(1, sizes_list, ratios_list, ctx=ctx, verbose=False, prefix=\"ssd_\")\n",
    "net.load_params(\"models/papercup_vgg11bn29_512x512_data_sizes.param\", ctx=ctx)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def draw_ROC(outs, labels, overlap_threshold=0.01, verbose=False, show=True, color='r', label_suffix=\"\"):\n",
    "    outs = outs.asnumpy()   # share memory with outs's ndarray\n",
    "    labels = labels.asnumpy()\n",
    "    scores, recall, prec = cal_scores_recall_prec(outs, labels, overlap_threshold)\n",
    "    plt.plot(recall,prec, '-', label=\"recall prec\"+label_suffix, color=color)\n",
    "    plt.plot(recall, scores, '--', label=\"recall score\"+label_suffix, color=color)\n",
    "    plt.legend(loc=\"upper right\")\n",
    "    if show:\n",
    "        plt.show()\n",
    "train_data.reset()\n",
    "outs = None\n",
    "labels = None\n",
    "for i, batch in enumerate(train_data):\n",
    "    data = batch.data[0].as_in_context(ctx)\n",
    "    label = batch.label[0].as_in_context(ctx)\n",
    "    anchors, box_preds, cls_preds = net(data)\n",
    "    cls_probs = nd.SoftmaxActivation(cls_preds.transpose((0, 2, 1)), mode=\"channel\")\n",
    "    out = MultiBoxDetection(cls_probs, box_preds, anchors, force_suppress=True, clip=False, nms_threshold=0.45)\n",
    "    if outs is None:\n",
    "        outs = out\n",
    "        labels = label\n",
    "    else:\n",
    "        outs = nd.concat(*[outs, out], dim=0)\n",
    "        labels = nd.concat(*[labels, label], dim=0)\n",
    "\n",
    "print (outs.shape)\n",
    "print (labels.shape)\n",
    "print (evaluate_MAP(outs, labels))\n",
    "draw_ROC(outs, labels, verbose=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def find_best_score_th(outs, labels, overlap_threshold=0.01):\n",
    "    scores, recall, prec = cal_scores_recall_prec(outs, labels, overlap_threshold)\n",
    "    max_area = 0\n",
    "    max_i = -1\n",
    "    for i in range(recall.shape[0]):\n",
    "        if max_area < recall[i] * prec[i]:\n",
    "            max_area = recall[i] * prec[i]\n",
    "            max_i = i\n",
    "    return scores[i]\n",
    "score_th = find_best_score_th(outs.asnumpy(), labels.asnumpy())\n",
    "print (score_th)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from mxnet import image\n",
    "valid_data = image.ImageDetIter(\n",
    "    batch_size=batch_size, \n",
    "    data_shape=data_shape,\n",
    "    path_imgrec=rec_prefix+'_val.rec',\n",
    "    path_imgidx=rec_prefix+'_val.idx',\n",
    "    shuffle=True,\n",
    "    mean=True,\n",
    "    std=True\n",
    ")\n",
    "valid_data.reshape(label_shape=(3,5))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from utils import show_det_results\n",
    "from mxnet.ndarray.contrib import MultiBoxDetection\n",
    "import matplotlib as mpl\n",
    "mpl.rcParams['figure.figsize'] = (6, 6)\n",
    "try:\n",
    "    batch = valid_data.next()\n",
    "except Exception:\n",
    "    valid_data.reset()\n",
    "data = batch.data[0].as_in_context(ctx)\n",
    "label = batch.label[0].as_in_context(ctx)\n",
    "tic = time.time()\n",
    "anchors, box_preds, cls_preds = net(data)\n",
    "print(time.time() - tic)\n",
    "cls_probs = nd.SoftmaxActivation(cls_preds.transpose((0, 2, 1)), mode='channel')\n",
    "out = MultiBoxDetection(cls_probs, box_preds, anchors, force_suppress=True, clip=False, nms_threshold=0.2)\n",
    "box_offset, box_mask, cls_labels = MultiBoxTarget(anchors, label, \n",
    "                                                  cls_preds.transpose(axes=(0, 2, 1)), negative_mining_ratio=3)\n",
    "\n",
    "imgs = (data.transpose((0, 2, 3, 1)).asnumpy() * std)+ rgb_mean\n",
    "#plt.imshow(imgs[0] / 255)\n",
    "show_det_results(imgs, out, threshold=0.4, show_text=False, MN=(2, 4))"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.5.2"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
