import os

os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
from visdom import Visdom
# os.environ["MXNET_ENGINE_TYPE"] = "NaiveEngine"
from lib.config import get_default_config
from models.ml_resnet101_ori import RFCN_Resnet as RFCN_Resnet_GL
from lib.data.coco import COCODetection
import mxnet.autograd as ag
import mxnet as mx
import numpy as np
import mxnet.ndarray as nd
import logging
import tqdm
import gluoncv, cv2
from gluoncv.utils.metrics.coco_detection import COCODetectionMetric
from lib.nms.nms import gpu_nms_wrapper
from lib.data.anchor_loader import AnchorDataset
from mxnet.gluon.data import DataLoader
import matplotlib.pyplot as plt
from gluoncv.utils.parallel import DataParallelModel
from itertools import chain
from pprint import pprint
from lib.common import log_init,shortname
import time
from lib.transforms.bbox import bbox_image_pad



def getTrainDataLoader(*args, **kwargs):
    dataset = AnchorDataset(*args, **kwargs)
    return DataLoader(dataset=dataset, batch_size=8, shuffle=True, num_workers=16,last_batch="discard")


def getEvalDataLoader(bbox_dataset):
    return DataLoader(dataset=bbox_dataset, batch_size=1, shuffle=False)


def validate(net, nms_wrapper, eval_dataset, device_id):
    images2val = [eval_dataset.at_with_image_path(i)[0] for i in range(50) ]
    detection_result = []

    for img_name in tqdm.tqdm(images2val):
        t0 = time.time()
        ori_img = cv2.imread(img_name)[:,:,:]
        fscale = 800.0 / ori_img.shape[1]
        img_resized = cv2.resize(ori_img, (0, 0), fx=fscale, fy=fscale)

        img_float = img_resized.astype(np.float32)
        mean = np.array([103.06,115.90, 123.15])[np.newaxis,np.newaxis]
        img_float -= mean
        img_float = img_float[:,:,(2,1,0)]
        img_float = np.transpose(img_float,(2,0,1))
        data = mx.nd.array(img_float[np.newaxis],ctx = mx.gpu(device_id))

        im_info = nd.array([[data.shape[2],data.shape[3],3]],ctx=mx.gpu(device_id))
        rois, cls_score, bbox_pred = net(img_resized[np.newaxis],data,im_info,None)
        pred_bboxes,pred_scores,pred_labels = net.post_process(rois, bbox_pred, cls_score,nms_wrapper,data.shape,scale=fscale)
        # print(time.time()-t0)
        # gluoncv.utils.viz.plot_bbox(ori_img[:,:,::-1],bboxes = pred_bboxes,scores = pred_scores ,class_names=COCODetection.CLASSES,labels=pred_labels)
        # plt.show()
        for bbox,cls,label in zip(pred_bboxes,pred_scores,pred_labels):
            one_result = [shortname(img_name)]+list(bbox)+[label,cls]
            detection_result.append(one_result)
    result = eval_dataset.coco_detection_evaluate(detection_result)
    return result.stats[0]
def single_forward(net, nms_wrapper, ori_img, device_id=0, img_width=1024.0):
    h, w, c = ori_img.shape
    fscale = img_width / ori_img.shape[1]
    img_resized = cv2.resize(ori_img, (0, 0), fx=fscale, fy=fscale)

    img_float = img_resized.astype(np.float32)
    mean = np.array([103.06, 115.90, 123.15])[np.newaxis, np.newaxis]
    img_float -= mean
    img_float = img_float[:, :, (2, 1, 0)]
    img_float = np.transpose(img_float, (2, 0, 1))
    data = mx.nd.array(img_float[np.newaxis], ctx=mx.gpu(device_id))

    im_info = nd.array([[data.shape[2], data.shape[3], 3]], ctx=mx.gpu(device_id))
    rcnn_rois, rcnn_cls_prob, rcnn_bbox_pred = net(img_resized[np.newaxis], data, im_info, None)
    pred_bboxes, pred_scores, pred_labels = net.post_process(rcnn_rois, rcnn_bbox_pred, rcnn_cls_prob, nms_wrapper,
                                                             [c, h, w], scale=fscale)
    return pred_bboxes, pred_scores, pred_labels


def main(do_eval=False):
    gpu_ids = [5,6,7,8]
    viz = Visdom(port=8098)

    ctx_list = [mx.gpu(x) for x in gpu_ids]
    cfg = get_default_config(network_stride = [16,16])
    cfg.dataset.NUM_CLASSES = 81#with background
    cfg.dataset.annFile = "../../dataset/coco/annotations/instances_train2014.json"
    cfg.dataset.image_root = "../../dataset/coco/images/train2014"
    cfg.dataset.val_annFile = "../../dataset/coco/annotations/instances_val2014.json"
    cfg.dataset.val_image_root = "../../dataset/coco/images/val2014"
    cfg.TRAIN.ENABLE_OHEM = True
    cfg.pretrained = "pretrained/resnet_v1_101-0000.params" #load resnet-101 pretrained model.
    cfg.resume = "output/coco_rfcn/roipooling_1_6400.params"
    cfg.TRAIN_VIZ_ENV = "coco_ori"
    cfg.TEST.KEEP_THRESHOLD = .1
    cfg.TRAIN.VIZ_RPN = False
    cfg.TRAIN.DIDSPLAY_INTERVAL = 25
    cfg.LOG_DIR="output/coco_rfcn/"

    os.system("mkdir -p {}".format(cfg.LOG_DIR))
    log_init(filename="{}/log_{}.log".format(cfg.LOG_DIR,time.time()))
    pprint(cfg)
    net_signle_gpu = RFCN_Resnet_GL(cfg=cfg, is_train=True, pretrained=False,viz_env=cfg.TRAIN_VIZ_ENV)
    # net_signle_gpu.load_parameters("pretrained/roipooling_0_14400.params",allow_missing=False,ctx = ctx_list)
    print(net_signle_gpu.collect_params().keys())
    # net_signle_gpu.initialize(force_reinit=False,init = mx.init.Normal(std=0.01))
    net = DataParallelModel(net_signle_gpu,ctx_list=ctx_list)


    val_dataset = COCODetection(anno_path=cfg.dataset.val_annFile,image_root=cfg.dataset.val_image_root)
    train_dataset = COCODetection(cfg.dataset.annFile,cfg.dataset.image_root)
    train_transform = bbox_image_pad(dest_shape=(800, 800))  # h=512,w=1024
    train_loader = getTrainDataLoader(bbox_dataset=train_dataset, cfg=cfg, transform=train_transform)

    nms_wrapper = gpu_nms_wrapper(thresh=0.1, device_id=0)
    params = dict(net.module.collect_params())
    params_train  = {}
    for key in params.keys():
        is_train = True
        if "beta" in str(key) or "gamma" in str(key):
            is_train = False
            logging.info(key)
        else:
            for fixed in cfg.FIXED_PARAMS:
                if str(key).startswith(fixed):
                    is_train = False
                    logging.info(key)
        if is_train:
            params_train[key] = params[key]
    logging.info(params_train.keys())
    trainer = mx.gluon.Trainer(params,
                               'adam',
                               {'learning_rate': 1e-4,
                                # 'wd': 5e-3,
                                # 'momentum': 0.9,
                                'clip_gradient': None,
                                'multi_precision': True
                                },
                               )
    net.module.collect_params().zero_grad()
    trained_steps = 0
    for nepoch in range(80):

        batch_loss_rpn_cls = 0
        batch_loss_rpn_bbox = 0
        batch_loss_cls = 0
        batch_loss_bbox = 0

        logging.info("[Epoch {}] steps={} lr={}".format(nepoch, trained_steps, trainer.learning_rate))
        for nbatch, batch in enumerate(train_loader):
            try:
                if nepoch > 3:
                    trainer.set_learning_rate(1e-5)
                if nepoch >6:
                    trainer.set_learning_rate(1e-6)
                batch = [mx.nd.array(x, ctx=mx.cpu()) for x in batch]
                batch_ori_img, img, im_info, rpn_cls_label, rpn_cls_weight, rpn_bbox_target, rpn_bbox_weight, gt_boxes = batch
                ori_img = batch_ori_img[0].asnumpy()
                label = {}
                label["rpn_cls_label"] = rpn_cls_label
                label['rpn_cls_weight'] = rpn_cls_weight
                label["rpn_bbox_target"] = rpn_bbox_target
                label["rpn_bbox_weight"] = rpn_bbox_weight
                label["gt_boxes"] = gt_boxes
                with ag.record(True):
                    losses_all = \
                        net(batch_ori_img, img, im_info, label)
                    loss_rpn_cls = [x[0] for x in losses_all]
                    loss_rpn_l1 = [x[1] for x in losses_all]
                    rcnn_loss_cls = [x[2] for x in losses_all]
                    rcnn_loss_l1 = [x[3] for x in losses_all]

                ag.backward(loss_rpn_cls + loss_rpn_l1 + rcnn_loss_cls + rcnn_loss_l1 )
                trainer.step(1, ignore_stale_grad=False)
                trained_steps += 1
                batch_loss_rpn_cls += sum(mx.nd.sum(x).asscalar() for x in loss_rpn_cls )
                batch_loss_rpn_bbox += sum(mx.nd.sum(x).asscalar() for x in loss_rpn_l1 )
                batch_loss_cls += sum(mx.nd.sum(x).asscalar() for x in rcnn_loss_cls )
                batch_loss_bbox += sum(mx.nd.sum(x).asscalar() for x in rcnn_loss_l1 )

                if nbatch % cfg.TRAIN.DIDSPLAY_INTERVAL == 0:
                    try:
                        pred_bboxes, pred_scores, pred_labels = single_forward(net_signle_gpu, nms_wrapper, ori_img, device_id=gpu_ids[0],img_width=768)

                        gluoncv.utils.viz.plot_bbox(ori_img, bboxes=pred_bboxes, scores=pred_scores, thresh=0.01,class_names=COCODetection.CLASSES,labels=pred_labels)
                        plt.gcf().set_size_inches(w=15, h=15)
                        viz.matplot(plt, win="test result", env=cfg.TRAIN_VIZ_ENV)
                        plt.close()
                        logging.info(
                            "EPOCH[{0}] BATCH[{1}] RPNClsLoss={2:.4f} RPNL1Loss={3:.4f}, CLSLoss={4:.4f} L1Loss={5:.4f}".format(
                                nepoch,
                                nbatch,
                                batch_loss_rpn_cls / cfg.TRAIN.DIDSPLAY_INTERVAL,
                                batch_loss_rpn_bbox / cfg.TRAIN.DIDSPLAY_INTERVAL,
                                batch_loss_cls / cfg.TRAIN.DIDSPLAY_INTERVAL,
                                batch_loss_bbox / cfg.TRAIN.DIDSPLAY_INTERVAL,

                            ))
                        batch_loss_rpn_cls = 0
                        batch_loss_rpn_bbox = 0
                        batch_loss_cls = 0
                        batch_loss_bbox = 0
                    except Exception as e:
                        logging.exception(e)
                if trained_steps % 1000 == 0 or trained_steps == 1:
                    score = validate(net_signle_gpu,nms_wrapper,eval_dataset=val_dataset,device_id = gpu_ids[0])
                    net.module.collect_params().save(cfg.LOG_DIR+"/weights-%d-%d-[%.4f].params"%(nepoch, nbatch,score))
                    trainer.save_states("output/coco_ori/trainer-state.params".format(nepoch, nbatch))
            except AssertionError as e:
                logging.exception(e)

if __name__ == "__main__":
    main()