from __future__ import print_function

import logging
import os,time
import pickle
import pprint
import cv2
import mxnet as mx
import mxnet.autograd as ag
import numpy as np
import tqdm
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric

import models.defomableconvnets.rfcn_metrics as metric
from data.bbox.voc import VOCDetection
from models.defomableconvnets.config import config, update_config
from models.defomableconvnets.resnet_v1_101_rfcn_dcn import resnet_v1_101_rfcn_dcn
from models.defomableconvnets.rfcnroidbdataiter import AnchorLoader
from utils.im_detect import im_detect_bbox_aug
from utils.lrsheduler import WarmupMultiFactorScheduler
from utils.parallel import DataParallelModel, parallel_backward
from utils.params import load_param
from utils.roidb import append_flipped_images
from utils.common import log_init

class RFCN_Resnet(mx.gluon.nn.HybridBlock):
    def hybrid_forward(self, F, *args):
        if ag.is_training():
            print("Train model")
            return self.features(*args)
        else:
            print("Test model")
            return self.features_test(*args)
    def __init__(self,sym , sym_test, cfg, resume= None):
        super(RFCN_Resnet,self).__init__()
        input_names = ["data","im_info","gt_boxes","label","bbox_target","bbox_weight"]
        self.features = mx.gluon.nn.SymbolBlock(
            sym,
            [mx.sym.Variable(x) for x in input_names]
        )
        self.features_test = mx.gluon.nn.SymbolBlock(
            sym_test,
            [mx.sym.Variable(x) for x in ["data","im_info"]],
            params=self.features.collect_params()
        )
        net_params = self.features.collect_params()

        net_params["res5a_branch2b_offset_weight"].initialize(init=mx.init.Zero())
        net_params["res5a_branch2b_offset_bias"].initialize(init=mx.init.Zero())
        net_params["res5b_branch2b_offset_weight"].initialize(init=mx.init.Zero())
        net_params["res5b_branch2b_offset_bias"].initialize(init=mx.init.Zero())
        net_params["res5c_branch2b_offset_weight"].initialize(init=mx.init.Zero())
        net_params["res5c_branch2b_offset_bias"].initialize(init=mx.init.Zero())


        ctx = mx.cpu()
        args, auxes = load_param("/media/kohill/data/kohill/pretrained/resnet_v1_101-0000.params")
        for param in args.keys():
            if param in net_params:
                net_params[param]._load_init(args[param], ctx=ctx)
        for param in auxes.keys():
            if param in net_params:
                net_params[param]._load_init(auxes[param], ctx=ctx)

        for param in net_params.values():
            if param._data is not None:
                continue
            param.initialize(init = mx.init.Normal())
        if resume is not None and os.path.exists(str(resume)):
            args = mx.nd.load(resume)
            for param in args.keys():
                net_params = self.features.collect_params()
                if param in net_params:
                    net_params[param]._load_init(args[param], ctx=mx.cpu())
            logging.info("Resumed parameters from {}".format(str(resume)))
def train_net(ctx, pretrained, epoch, prefix, begin_epoch, end_epoch, lr, lr_step):
    mx.random.seed(3)
    np.random.seed(3)

    batch_size = len(ctx)
    sym_instance =resnet_v1_101_rfcn_dcn()
    sym = sym_instance.get_symbol(config, is_train=True)
    sym_test = sym_instance.get_symbol(config, is_train=False)
    feat_sym = sym.get_internals()['rpn_cls_score_output']
    net = RFCN_Resnet(sym,sym_test,config,resume=config.TRAIN.RESUME)
    train_dataset = VOCDetection(root="/media/kohill/data/kohill/VOCdevkit", splits=((2007, 'trainval'),(2012, 'trainval')))
    val_dataset = VOCDetection(root="/media/kohill/data/kohill/VOCdevkit", splits=((2007, 'test'),))

    train_dataset.to_roidb("cache/voc_train.roidb")
    train_roidb = pickle.load(open("cache/voc_train.roidb","rb"))
    logging.info("length of roidb: {}".format(len(train_roidb)))
    if config.TRAIN.FLIP: train_roidb = append_flipped_images(train_roidb)
    logging.info("length of roidb: {}".format(len(train_roidb)))
    train_data = AnchorLoader(feat_sym, train_roidb, config, batch_size=len(ctx), shuffle=config.TRAIN.SHUFFLE, ctx=ctx,
                                  feat_stride=config.network.RPN_FEAT_STRIDE, anchor_scales=config.network.ANCHOR_SCALES,
                                  anchor_ratios=config.network.ANCHOR_RATIOS, aspect_grouping=config.TRAIN.ASPECT_GROUPING)
    net.collect_params().reset_ctx(list(set(ctx)))
    rpn_eval_metric = metric.RPNAccMetric()
    rpn_cls_metric = metric.RPNLogLossMetric()
    rpn_bbox_metric = metric.RPNL1LossMetric()
    eval_metric = metric.RCNNAccMetric(config)
    cls_metric = metric.RCNNLogLossMetric(config)
    bbox_metric = metric.RCNNL1LossMetric(config)
    eval_metrics = mx.metric.CompositeEvalMetric()
    for child_metric in [rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric]:
        eval_metrics.add(child_metric)

    params_all = net.collect_params()
    params_to_train = {}
    params_fixed_prefix = config.network.FIXED_PARAMS
    for p in params_all.keys():
        ignore = False
        for f in params_fixed_prefix:
            if str(p).startswith(f):
                ignore = True
                params_all[p].grad_req = 'null'
                logging.info("{} is ignored when training.".format(p))
        if not ignore: params_to_train[p] = params_all[p]
    base_lr = lr
    lr_factor = config.TRAIN.lr_factor
    lr_epoch = [float(epoch) for epoch in lr_step.split(',')]
    lr_epoch_diff = [epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch]
    lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))
    lr_iters = [int(epoch * len(train_roidb) / batch_size) for epoch in lr_epoch_diff]
    print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters)
    lr_scheduler = WarmupMultiFactorScheduler(lr_iters, lr_factor, config.TRAIN.warmup, config.TRAIN.warmup_lr, config.TRAIN.warmup_step)

    trainer = mx.gluon.Trainer(
        net.collect_params(),  # fix batchnorm, fix first stage, etc...
        'sgd',
        {'learning_rate': config.TRAIN.lr,
         'wd': config.TRAIN.wd,
         'momentum': config.TRAIN.momentum,
         'clip_gradient': None,
         'lr_scheduler': lr_scheduler
         })
    val_metric_5 = VOC07MApMetric(iou_thresh=.5)

    net_parallel = DataParallelModel(net,ctx_list=ctx)

    for epoch in range(begin_epoch, config.TRAIN.end_epoch):
        train_data.reset()
        net.hybridize(static_alloc=True, static_shape=False)
        for nbatch, data_batch in enumerate(tqdm.tqdm(train_data, total = len(train_roidb) // batch_size)):
            inputs = [[x.as_in_context(c) for x in d + l] for c,d,l in zip(ctx, data_batch.data, data_batch.label)]
            with ag.record():
                outputs = [net(*x) for x in inputs]
                ag.backward(sum(outputs, []))
                # outputs = net_parallel(*inputs)
                # ag.backward(sum(outputs, ()))
            eval_metrics.update(data_batch.label[0],outputs[0])
            trainer.step(1)
            if nbatch % 100 == 0:
                msg = ','.join(['{}={:.3f}'.format(w,v) for w,v in zip(*eval_metrics.get())])
                msg += ",lr={}".format(trainer.learning_rate)
                logging.info(msg)
                eval_metrics.reset()

        val_metric_5.reset()
        net.hybridize(static_alloc=True, static_shape=False)
        for i in tqdm.tqdm(range(len(val_dataset))):
            img_path, gt_boxes = val_dataset.at_with_image_path(i)
            pred_bboxes, pred_scores, pred_clsid = im_detect_bbox_aug(net,nms_threshold=config.TEST.NMS,
                                                                      im=cv2.imread(img_path), # bgr
                                                                      scales=config.SCALES,
                                                                      pixel_means = config.network.PIXEL_MEANS,
                                                                      ctx = ctx,
                                                                      bbox_stds = config.TRAIN.BBOX_STDS,
                                                                      flip=False,
                                                                      threshold=1e-3,
                                                                      viz=False
                                                                      )
            val_metric_5.update(pred_bboxes = pred_bboxes[np.newaxis],
                                pred_labels = pred_clsid[np.newaxis]-1,
                                pred_scores = pred_scores[np.newaxis],
                                gt_bboxes = gt_boxes[np.newaxis,:,:4],
                                gt_labels = gt_boxes[np.newaxis,:,4],
                                gt_difficults=gt_boxes[np.newaxis,:,5])
        re = val_metric_5.get()
        logging.info(re)
        save_path = "{}-{}-{}.params".format(config.TRAIN.model_prefix,epoch,re[1])
        net.collect_params().save(save_path)
        logging.info("Saved checkpoint to {}.".format(save_path))
def main():
    update_config("configs/voc/resnet_v1_101_voc0712_rfcn_dcn_end2end_ohem_one_gpu.yaml")
    log_init(filename=config.TRAIN.model_prefix + "train.log")
    msg = pprint.pformat(config)
    logging.info(msg)
    os.environ["MXNET_CUDNN_AUTOTUNE_DEFAULT"] = "0"
    ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
    train_net(ctx, config.network.pretrained, config.network.pretrained_epoch, config.TRAIN.model_prefix,
              config.TRAIN.begin_epoch, config.TRAIN.end_epoch, config.TRAIN.lr,
               config.TRAIN.lr_step)

if __name__ == '__main__':
    main()
