'''
@author: kohill
This file is trying to train rfcn on VOC dataset.
Some baselines can be reviewed.
1. faster-rcnn.pytorch on train2007 trainval &test2007
model  	#GPUs	batch size	lr      	lr_decay	max_epoch    	time/epoch	mem/GPU	mAP
Res-101  8	    24	        1e-2	    10	        12	            0.17 hr	    10327MB	75.1

2.
'''

import os

os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
from visdom import Visdom
# os.environ["MXNET_ENGINE_TYPE"] = "NaiveEngine"
from lib.config import get_default_config
from models.ml_resnet101_ori import RFCN_Resnet as RFCN_Resnet_GL
from lib.data.voc import VOCDetection
import mxnet.autograd as ag
import mxnet as mx
import numpy as np
import mxnet.ndarray as nd
import logging
import tqdm
import gluoncv, cv2
from lib.nms.nms import gpu_nms_wrapper
from lib.data.anchor_loader import AnchorDataset
from mxnet.gluon.data import DataLoader
import matplotlib.pyplot as plt
from gluoncv.utils.parallel import DataParallelModel
from pprint import pprint
from lib.common import log_init
import time
from lib.transforms.bbox import bbox_image_pad

from gluoncv.utils.metrics.voc_detection import VOC07MApMetric


def getTrainDataLoader(*args, **kwargs):
    dataset = AnchorDataset(*args, **kwargs)
    return DataLoader(dataset=dataset, batch_size=4, shuffle=True, num_workers=16, last_batch="discard")


def getEvalDataLoader(bbox_dataset):
    return DataLoader(dataset=bbox_dataset, batch_size=1, shuffle=False)


def validate(net, nms_wrapper, eval_dataset, eval_metric, device_id):
    eval_metric.reset()
    for n,(ori_img, gt_boxes) in enumerate(tqdm.tqdm(eval_dataset)):
        if isinstance(gt_boxes, mx.nd.NDArray):
            gt_boxes = gt_boxes.asnumpy()
        if isinstance(ori_img, mx.nd.NDArray):
            ori_img = ori_img.asnumpy()
        fscale = 800.0 / max(ori_img.shape[:2])
        from lib.transforms.bbox import bbox_image_pad_n
        img_resized,_ = bbox_image_pad_n(n=16)(cv2.resize(ori_img, (0, 0), fx=fscale, fy=fscale),None)

        img_float = img_resized.astype(np.float32)
        mean = np.array([103.06, 115.90, 123.15])[np.newaxis, np.newaxis]
        img_float -= mean
        img_float = img_float[:, :, (2, 1, 0)]
        img_float = np.transpose(img_float, (2, 0, 1))
        data = mx.nd.array(img_float[np.newaxis], ctx=mx.gpu(device_id))

        im_info = nd.array([[data.shape[2], data.shape[3], 3]], ctx=mx.gpu(device_id))
        rois, cls_score, bbox_pred = net(img_resized[np.newaxis], data, im_info, None,None,None,None,None)
        pred_bboxes, pred_scores, pred_labels = net.post_process(rois, bbox_pred, cls_score, nms_wrapper, data.shape,
                                                                 scale=fscale)
        eval_metric.update(pred_bboxes=pred_bboxes[np.newaxis],
                           pred_scores=pred_scores[np.newaxis],
                           pred_labels=pred_labels[np.newaxis],
                           gt_bboxes=gt_boxes[:, :4][np.newaxis],
                           gt_labels=gt_boxes[:, 4][np.newaxis],
                           gt_difficults = gt_boxes[:,5][np.newaxis])
        if n % 1000 == 0:
            result = dict([(x, y) for x, y in zip(*eval_metric.get())])
            pprint(result)
    result = dict([(x,y) for x,y in zip (*eval_metric.get())])
    pprint(result)
    return result['mAP']


def single_forward(net, nms_wrapper, ori_img, device_id=0, img_width=1024.0):
    h, w, c = ori_img.shape
    fscale = img_width / ori_img.shape[1]
    img_resized = cv2.resize(ori_img, (0, 0), fx=fscale, fy=fscale)

    img_float = img_resized.astype(np.float32)
    mean = np.array([103.06, 115.90, 123.15])[np.newaxis, np.newaxis]
    img_float -= mean
    img_float = img_float[:, :, (2, 1, 0)]
    img_float = np.transpose(img_float, (2, 0, 1))
    data = mx.nd.array(img_float[np.newaxis], ctx=mx.gpu(device_id))

    im_info = nd.array([[data.shape[2], data.shape[3], 3]], ctx=mx.gpu(device_id))
    rcnn_rois, rcnn_cls_prob, rcnn_bbox_pred = net(img_resized[np.newaxis], data, im_info, None)
    pred_bboxes, pred_scores, pred_labels = net.post_process(rcnn_rois, rcnn_bbox_pred, rcnn_cls_prob, nms_wrapper,
                                                             [c, h, w], scale=fscale)
    return pred_bboxes, pred_scores, pred_labels


def main(do_eval=False):
    gpu_ids = [8]
    viz = Visdom(port=8098)

    ctx_list = [mx.gpu(x) for x in gpu_ids]
    cfg = get_default_config(network_stride=[16, 16],scales=[8,16,32])
    cfg.dataset.NUM_CLASSES = 21  # with background

    cfg.dataset.root = "/data1/zyx/VOC/voc_dataset"
    cfg.TRAIN.ENABLE_OHEM = True
    # cfg.pretrained = "/data1/zyx/yks/object_detection/Deformable-ConvNets/output/rfcn_dcn/voc/resnet_v1_101_voc0707_rfcn_dcn_end2end_ohem/2007_trainval/rfcn_voc-0007.params"  # load resnet-101 pretrained model.
    cfg.resume = "output/voc_rfcn/weights-19-1251-[0.6905].params"
    cfg.TRAIN_VIZ_ENV = "voc_ori"
    cfg.TEST.KEEP_THRESHOLD = 1e-5
    cfg.TRAIN.VIZ_RPN = False
    cfg.TRAIN.DIDSPLAY_INTERVAL = 25
    cfg.LOG_DIR = "output/voc_rfcn/"

    pprint(cfg)
    net_signle_gpu = RFCN_Resnet_GL(cfg=cfg, is_train=True, pretrained=True, viz_env=cfg.TRAIN_VIZ_ENV)
    net = DataParallelModel(net_signle_gpu, ctx_list=ctx_list)

    train_dataset = VOCDetection(root=cfg.dataset.root, splits=((2007, 'trainval'),))
    val_dataset = VOCDetection(root=cfg.dataset.root, splits=((2007, 'test'),))

    nms_wrapper = gpu_nms_wrapper(thresh=0.3, device_id=0)

    eval_metric = VOC07MApMetric(class_names=val_dataset.classes)
    score = validate(net_signle_gpu, nms_wrapper, eval_dataset=val_dataset, device_id=gpu_ids[0],
                     eval_metric=eval_metric)

    '''
    "output/voc_rfcn/weights-19-1251-[0.6905].params" -> pad16 0.7092 (short side is 600)
    "output/voc_rfcn/weights-19-1251-[0.6905].params" -> pad16 0.7088 (long side is 800)    
    '''
if __name__ == "__main__":
    main()
