{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Detectron2 Training\n",
    "\n",
    "Psuedo-code of what this notebook does\n",
    "\n",
    "```\n",
    "for train_json in train_jsons:\n",
    "    for _ in range(n):\n",
    "        run training on train_json\n",
    "        for test_json in test_jsons:\n",
    "            run evaluation\n",
    "        report average AP50 on the test_jsons\n",
    "\n",
    "```\n",
    "\n",
    "For evaluating, we split the test set into a 3 possibly overlapping subsets and this becomes the list of `test_jsons` the model is evaluated on. \n",
    "\n",
    "We also do training and evaluation loop for each `train_json` n times to check the variance in the setup. "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "coco_yaml = \"COCO-InstanceSegmentation/mask_rcnn_R_50_FPN_3x.yaml\"\n",
    "lvis_yaml = \"LVIS-InstanceSegmentation/mask_rcnn_R_50_FPN_1x.yaml\"\n",
    "lvis_yaml2 = \"LVIS-InstanceSegmentation/mask_rcnn_R_101_FPN_1x.yaml\"\n",
    "pano_yaml = \"COCO-PanopticSegmentation/panoptic_fpn_R_50_3x.yaml\"\n",
    "\n",
    "img_dir_test = '/checkpoint/apratik/ActiveVision/active_vision/replica_random_exploration_data/frl_apartment_1/rgb'\n",
    "test_jsons = ['active_vision/frlapt1_20n0.json', 'active_vision/frlapt1_20n1.json', 'active_vision/frlapt1_20n2.json']\n",
    "\n",
    "img_dir_train = '/checkpoint/apratik/finals/straightline/apartment_0/rgb'\n",
    "\n",
    "# sanity checking, subsetS of the training set.\n",
    "# train_jsons = [\n",
    "#     'active_vision/sanity_default_apt0_1n.json',\n",
    "#     'active_vision/sanity_default_apt0_10n.json',\n",
    "#     'active_vision/sanity_default_apt0_20n.json',\n",
    "#     'active_vision/sanity_default_apt0_30n.json',\n",
    "#     'active_vision/sanity_default_apt0_40n.json',\n",
    "#     'active_vision/sanity_default_apt0_50n.json',\n",
    "#     'active_vision/sanity_default_apt0_60n.json',\n",
    "#     'active_vision/sanity_default_apt0_70n.json',\n",
    "#     'active_vision/sanity_default_apt0_80n.json',\n",
    "# ]\n",
    "\n",
    "# Table 2 - prop fixed, label prop, different GT frames, default heu\n",
    "# train_jsons = [\n",
    "#     'active_vision/default_apt0_gt50p2fix_corln.json',\n",
    "#     'active_vision/default_apt0_gt100p2fix_corln.json',\n",
    "#     'active_vision/default_apt0_gt150p2fix_corln.json',\n",
    "#     'active_vision/default_apt0_gt200p2fix_corln.json',\n",
    "#     'active_vision/default_apt0_gt250p2fix_corln.json',\n",
    "# ]\n",
    "\n",
    "# Table 2 - prop fixed, no label prop, different GT frames\n",
    "# train_jsons = [\n",
    "#     'active_vision/base_straightline_apt0_gt50p2fix_corln.json',\n",
    "#     'active_vision/base_straightline_apt0_gt100p2fix_corln.json',\n",
    "#     'active_vision/base_straightline_apt0_gt150p2fix_corln.json',\n",
    "#     'active_vision/base_straightline_apt0_gt200p2fix_corln.json',\n",
    "#     'active_vision/base_straightline_apt0_gt250p2fix_corln.json',\n",
    "# ]\n",
    "\n",
    "# Table 2 - prop fixed, label prop, different GT frames\n",
    "# train_jsons = [\n",
    "#     'active_vision/straightline_apt0_gt5p2fix_corlnn.json',\n",
    "#     'active_vision/straightline_apt0_gt10p2fix_corlnn.json',\n",
    "#     'active_vision/straightline_apt0_gt15p2fix_corlnn.json',\n",
    "#     'active_vision/straightline_apt0_gt20p2fix_corlnn.json',\n",
    "#     'active_vision/straightline_apt0_gt25p2fix_corlnn.json',\n",
    "# ]\n",
    "\n",
    "train_jsons=[f'active_vision/straightline_apt0_gt{x}p2_rand_{y}.json' for x in range(5,30,5) for y in range(3)]\n",
    "\n",
    "# train_jsons = [f'active_vision/straightline_apt0_gt{x}p2fix_corlnn.json' for x in range(5, 30, 5)]\n",
    "\n",
    "# train_jsons = [f'active_vision/straightline_apt0_gt10p{x}_h1nn.json' for x in range(2, 10, 2)]\n",
    "\n",
    "# Table 1 - gt fixed, different label prop lengths\n",
    "# train_jsons = [\n",
    "#     'active_vision/straightline_apt0_gt100p1_corln.json',\n",
    "#     'active_vision/straightline_apt0_gt100p2_corln.json',\n",
    "#     'active_vision/straightline_apt0_gt100p4_corln.json',\n",
    "#     'active_vision/straightline_apt0_gt100p6_corln.json',\n",
    "# ]\n",
    "\n",
    "# train_jsons = [\n",
    "#     'active_vision/default_apt0_gt100p1_corln.json',\n",
    "#     'active_vision/default_apt0_gt100p2_corln.json',\n",
    "#     'active_vision/default_apt0_gt100p4_corln.json',\n",
    "#     'active_vision/default_apt0_gt100p6_corln.json',\n",
    "# ]\n",
    "    \n",
    "\n",
    "dataset_name = 'habitat_1'"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import torchvision\n",
    "\n",
    "# import some common libraries\n",
    "import numpy as np\n",
    "import sys\n",
    "if '/opt/ros/kinetic/lib/python2.7/dist-packages' in sys.path:\n",
    "    sys.path.remove('/opt/ros/kinetic/lib/python2.7/dist-packages')\n",
    "\n",
    "import cv2\n",
    "import detectron2\n",
    "from detectron2.utils.logger import setup_logger\n",
    "setup_logger()\n",
    "import random\n",
    "import os\n",
    "import numpy as np\n",
    "import json\n",
    "\n",
    "# import some common detectron2 utilities\n",
    "from detectron2 import model_zoo\n",
    "from detectron2.engine import DefaultPredictor\n",
    "from detectron2.config import get_cfg\n",
    "from detectron2.utils.visualizer import Visualizer, ColorMode\n",
    "from detectron2.config import get_cfg\n",
    "from detectron2.engine import DefaultTrainer\n",
    "from detectron2.config import CfgNode as CN\n",
    "from detectron2.data import DatasetCatalog, MetadataCatalog\n",
    "from detectron2.data import DatasetMapper, build_detection_train_loader\n",
    "from detectron2.data.datasets import register_coco_instances\n",
    "from detectron2.evaluation import COCOEvaluator, inference_on_dataset\n",
    "from detectron2.data import build_detection_test_loader, build_detection_train_loader\n",
    "import detectron2.data.transforms as T\n",
    "import shutil\n",
    "from setuptools.namespaces import flatten\n",
    "\n",
    "import random\n",
    "import torch \n",
    "import base64\n",
    "import io\n",
    "import matplotlib\n",
    "import matplotlib.pyplot as plt\n",
    "import pandas as pd\n",
    "from ast import literal_eval\n",
    "from PIL import Image\n",
    "\n",
    "pd.set_option('max_colwidth', 300)\n",
    "\n",
    "import glob\n",
    "from IPython.core.display import display, HTML\n",
    "\n",
    "pd.set_option('max_colwidth', 300)\n",
    "matplotlib.rcParams['figure.figsize'] = (20, 7.0)\n",
    "\n",
    "display(HTML(\n",
    "    \"\"\"\n",
    "    <style>\n",
    "    .container { width:100% !important; }\n",
    "    #notebook { letter-spacing: normal !important;; }\n",
    "    .CodeMirror { font-family: monospace !important; }\n",
    "    .cm-keyword { font-weight: bold !important; color: #008000 !important; }\n",
    "    .cm-comment { font-style: italic !important; color: #408080 !important; }\n",
    "    .cm-operator { font-weight: bold !important; color: #AA22FF !important; }\n",
    "    .cm-number { color: #080 !important; }\n",
    "    .cm-builtin { color: #008000 !important; }\n",
    "    .cm-string { color: #BA2121 !important; }\n",
    "    </style>\n",
    "    \"\"\"\n",
    "))\n",
    "\n",
    "\n",
    "## Detectron2 Setup\n",
    "\n",
    "from copy_paste import CopyPaste\n",
    "import albumentations as A\n",
    "\n",
    "class Trainer(DefaultTrainer):\n",
    "#     @classmethod\n",
    "#     def build_evaluator(cls, cfg, dataset_name, output_folder=None):\n",
    "#         if output_folder is None:\n",
    "#             output_folder = os.path.join(cfg.OUTPUT_DIR, \"inference\")\n",
    "#         return COCOEvaluator(dataset_name, output_dir=output_folder)\n",
    "    \n",
    "    @classmethod\n",
    "    def build_train_loader(cls, cfg):\n",
    "        mapper = DatasetMapper(cfg, is_train=True, augmentations=[\n",
    "            T.ResizeShortestEdge(short_edge_length=cfg.INPUT.MIN_SIZE_TRAIN, max_size=1333, sample_style='choice'),\n",
    "            T.RandomFlip(prob=0.5),\n",
    "            T.RandomCrop(\"absolute\", (640, 640)),\n",
    "            T.RandomBrightness(0.9, 1.1)\n",
    "        ])\n",
    "        return build_detection_train_loader(cfg, mapper=mapper)\n",
    "\n",
    "class COCOTrain:\n",
    "    def __init__(self, lr, w, maxiters):\n",
    "        self.cfg = get_cfg()\n",
    "        self.cfg.merge_from_file(model_zoo.get_config_file(coco_yaml))\n",
    "        self.cfg.SOLVER.BASE_LR = lr  # pick a good LR\n",
    "        self.cfg.SOLVER.MAX_ITER = maxiters\n",
    "        self.cfg.SOLVER.WARMUP_ITERS = w\n",
    "        \n",
    "    def reset(self, train_json, img_dir_train, dataset_name):\n",
    "        DatasetCatalog.clear()\n",
    "        MetadataCatalog.clear()\n",
    "        self.train_data = dataset_name +  \"_train\"\n",
    "        self.train_json = train_json\n",
    "        register_coco_instances(self.train_data, {}, train_json, img_dir_train)\n",
    "        self.results = {\n",
    "            \"bbox\": {\n",
    "                \"AP50\": []\n",
    "            },\n",
    "            \"segm\": {\n",
    "                \"AP50\": []\n",
    "            }\n",
    "        }\n",
    "    \n",
    "    def vis(self):\n",
    "        dataset_dicts = DatasetCatalog.get(self.train_data)\n",
    "        for d in random.sample(dataset_dicts, 2):\n",
    "            img = cv2.imread(d[\"file_name\"])\n",
    "            visualizer = Visualizer(img[:, :, ::-1], metadata=MetadataCatalog.get(self.train_data), scale=0.5)\n",
    "            vis = visualizer.draw_dataset_dict(d)\n",
    "            img = vis.get_image()\n",
    "            plt.figure(figsize=(12,8))\n",
    "            plt.imshow(img)\n",
    "            plt.show()\n",
    "            \n",
    "    def train(self):\n",
    "        cfg = self.cfg\n",
    "        print(f'SOLVER PARAMS {cfg.SOLVER.MAX_ITER, cfg.SOLVER.WARMUP_ITERS, cfg.SOLVER.BASE_LR}')\n",
    "        cfg.DATASETS.TRAIN = (self.train_data,)\n",
    "        cfg.DATASETS.TEST = ()\n",
    "        cfg.DATALOADER.NUM_WORKERS = 2\n",
    "        cfg.MODEL.WEIGHTS = model_zoo.get_checkpoint_url(coco_yaml)  # Let training initialize from model zoo\n",
    "        cfg.SOLVER.IMS_PER_BATCH = 2\n",
    "        cfg.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 128 \n",
    "        cfg.MODEL.ROI_HEADS.NUM_CLASSES = len(MetadataCatalog.get(self.train_data).get(\"thing_classes\"))  \n",
    "        cfg.OUTPUT_DIR = os.path.join('output_aug', str(cfg.SOLVER.MAX_ITER), x.split('.')[0][len('active_vision/'):])\n",
    "        print(f\"recreating {cfg.OUTPUT_DIR}\")\n",
    "        if os.path.isdir(cfg.OUTPUT_DIR):\n",
    "            shutil.rmtree(cfg.OUTPUT_DIR)\n",
    "        print(cfg.OUTPUT_DIR)\n",
    "        os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)\n",
    "        self.trainer = Trainer(cfg) #DefaultTrainer(cfg) \n",
    "        self.trainer.resume_or_load(resume=False)\n",
    "        self.trainer.train()\n",
    "\n",
    "    def run_eval(self, dataset_name, test_json, img_dir_test):\n",
    "        self.val_data = dataset_name + \"_val\"\n",
    "        self.test_json = test_json\n",
    "        self.cfg.DATASETS.TEST = (self.val_data,)\n",
    "        register_coco_instances(self.val_data, {}, test_json, img_dir_test)\n",
    "        \n",
    "        self.evaluator = COCOEvaluator(self.val_data, (\"bbox\", \"segm\"), False, output_dir=self.cfg.OUTPUT_DIR)\n",
    "        self.val_loader = build_detection_test_loader(self.cfg, self.val_data)\n",
    "        results = inference_on_dataset(self.trainer.model, self.val_loader, self.evaluator)\n",
    "        self.results['bbox']['AP50'].append(results['bbox']['AP50'])\n",
    "        self.results['segm']['AP50'].append(results['segm']['AP50'])\n",
    "        \n",
    "    def run_train(self, train_json, img_dir_train, dataset_name):\n",
    "        self.reset(train_json, img_dir_train, dataset_name)\n",
    "        self.vis()\n",
    "        self.train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "\n",
    "maxiters = 500\n",
    "lr = [0.001, 0.002, 0.005, 0.01, 0.02]\n",
    "warmup = [100, 200]\n",
    "\n",
    "def write_summary_to_file(filename, results, header_str):\n",
    "    if isinstance(results['bbox']['AP50'][0], list):\n",
    "        results['bbox']['AP50'] = list(flatten(results['bbox']['AP50']))\n",
    "        results['segm']['AP50'] = list(flatten(results['segm']['AP50']))\n",
    "    with open(filename, \"a\") as f:\n",
    "        f.write(header_str)\n",
    "        f.write(f\"\\nbbox AP50 {sum(results['bbox']['AP50'])/len(results['bbox']['AP50'])}\")\n",
    "        f.write(f\"\\nsegm AP50 {sum(results['segm']['AP50'])/len(results['segm']['AP50'])}\")\n",
    "        f.write(f'\\nall results {results}')\n",
    "            \n",
    "def main_loop(train_json, n):\n",
    "    results = {\n",
    "        \"bbox\": {\n",
    "            \"AP50\": []\n",
    "        },\n",
    "        \"segm\": {\n",
    "            \"AP50\": []\n",
    "        }\n",
    "    }\n",
    "    for _ in range(n):\n",
    "        c = COCOTrain(lr[0], warmup[0], maxiters)\n",
    "        dataset_name = x.split('.')[0][len('active_vision/'):]\n",
    "        print(f'dataset_name {dataset_name}')\n",
    "        c.run_train(train_json, img_dir_train, dataset_name)\n",
    "        for yix in range(len(test_jsons)):\n",
    "            c.run_eval(str(yix), test_jsons[yix], img_dir_test)\n",
    "        print(f'all results {c.results}')\n",
    "        results['bbox']['AP50'].append(c.results['bbox']['AP50'])\n",
    "        results['segm']['AP50'].append(c.results['segm']['AP50'])\n",
    "        write_summary_to_file('active_random_sample_granular.txt', c.results, f'\\ntrain_json {x}')\n",
    "\n",
    "    write_summary_to_file('active_random_sample_averaged.txt', results, f'\\ntrain_json {train_json}, average over {n} runs')\n",
    "   \n",
    "            \n",
    "for x in train_jsons:\n",
    "    main_loop(x, 10)\n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "loco",
   "language": "python",
   "name": "loco"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.7.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
