"""Train Faster-RCNN end to end."""
import argparse
import os
# disable autotune
os.environ['MXNET_CUDNN_AUTOTUNE_DEFAULT'] = '0'
import logging
import time
import numpy as np
import mxnet as mx
from mxnet import nd
from mxnet import gluon
from mxnet import autograd
import gluoncv as gcv
from gluoncv import data as gdata
from gluoncv import utils as gutils
from gluoncv.model_zoo import get_model
from gluoncv.data import batchify
from gluoncv.data.transforms.presets.rcnn import FasterRCNNDefaultTrainTransform
from gluoncv.data.transforms.presets.rcnn import FasterRCNNDefaultValTransform
from gluoncv.utils.metrics.voc_detection import VOC07MApMetric
from gluoncv.utils.metrics.coco_detection import COCODetectionMetric
from gluoncv.utils.metrics.accuracy import Accuracy
from tqdm import tqdm
from mxnet import ndarray
def parse_args():
    parser = argparse.ArgumentParser(description='Train Faster-RCNN networks e2e.')
    parser.add_argument('--network', type=str, default='resnet50_v2a',
                        help="Base network name which serves as feature extraction base.")
    parser.add_argument('--dataset', type=str, default='voc',
                        help='Training dataset. Now support voc.')
    parser.add_argument('--short', type=str, default='',
                        help='Resize image to the given short side side, default to 600 for voc.')
    parser.add_argument('--max-size', type=str, default='',
                        help='Max size of either side of image, default to 1000 for voc.')
    parser.add_argument('--num-workers', '-j', dest='num_workers', type=int,
                        default=4, help='Number of data workers, you can use larger '
                        'number to accelerate data loading, if you CPU and GPUs are powerful.')
    parser.add_argument('--gpus', type=str, default='0',
                        help='Training with GPUs, you can specify 1,3 for example.')
    parser.add_argument('--epochs', type=str, default='',
                        help='Training epochs.')
    parser.add_argument('--resume', type=str, default='',
                        help='Resume from previously saved parameters if not None. '
                        'For example, you can resume from ./faster_rcnn_xxx_0123.params')
    parser.add_argument('--start-epoch', type=int, default=0,
                        help='Starting epoch for resuming, default is 0 for new training.'
                        'You can specify it to 100 for example to start from 100 epoch.')
    parser.add_argument('--lr', type=str, default='',
                        help='Learning rate, default is 0.001 for voc single gpu training.')
    parser.add_argument('--lr-decay', type=float, default=0.1,
                        help='decay rate of learning rate. default is 0.1.')
    parser.add_argument('--lr-decay-epoch', type=str, default='',
                        help='epoches at which learning rate decays. default is 14,20 for voc.')
    parser.add_argument('--lr-warmup', type=str, default='',
                        help='warmup iterations to adjust learning rate, default is 0 for voc.')
    parser.add_argument('--momentum', type=float, default=0.9,
                        help='SGD momentum, default is 0.9')
    parser.add_argument('--wd', type=str, default='',
                        help='Weight decay, default is 5e-4 for voc')
    parser.add_argument('--log-interval', type=int, default=100,
                        help='Logging mini-batch interval. Default is 100.')
    parser.add_argument('--save-prefix', type=str, default='',
                        help='Saving parameter prefix')
    parser.add_argument('--save-interval', type=int, default=1,
                        help='Saving parameters epoch interval, best model will always be saved.')
    parser.add_argument('--val-interval', type=int, default=1,
                        help='Epoch interval for validation, increase the number will reduce the '
                             'training time if validation is slow.')
    parser.add_argument('--seed', type=int, default=233,
                        help='Random seed to be fixed.')
    parser.add_argument('--verbose', dest='verbose', action='store_true',
                        help='Print helpful debugging info once set.')
    args = parser.parse_args()
    if args.dataset == 'voc':
        args.short = int(args.short) if args.short else 600
        args.max_size = int(args.max_size) if args.max_size else 1000
        args.epochs = int(args.epochs) if args.epochs else 20
        args.lr_decay_epoch = args.lr_decay_epoch if args.lr_decay_epoch else '14,20'
        args.lr = float(args.lr) if args.lr else 0.001
        args.lr_warmup = args.lr_warmup if args.lr_warmup else -1
        args.wd = float(args.wd) if args.wd else 5e-4
    elif args.dataset == 'coco':
        args.short = int(args.short) if args.short else 800
        args.max_size = int(args.max_size) if args.max_size else 1333
        args.epochs = int(args.epochs) if args.epochs else 24
        args.lr_decay_epoch = args.lr_decay_epoch if args.lr_decay_epoch else '16,21'
        args.lr = float(args.lr) if args.lr else 0.00125
        args.lr_warmup = args.lr_warmup if args.lr_warmup else 8000
        args.wd = float(args.wd) if args.wd else 1e-4
        num_gpus = len(args.gpus.split(','))
        if num_gpus == 1:
            args.lr_warmup = -1
        else:
            args.lr *=  num_gpus
            args.lr_warmup /= num_gpus
    return args


class RPNAccMetric(mx.metric.EvalMetric):
    def __init__(self):
        super(RPNAccMetric, self).__init__('RPNAcc')

    def update(self, labels, preds):
        # label: [rpn_label, rpn_weight]
        # preds: [rpn_cls_logits]
        rpn_label, rpn_weight = labels
        rpn_cls_logits = preds[0]

        # calculate num_inst (average on those fg anchors)
        num_inst = mx.nd.sum(rpn_weight)

        # cls_logits (b, c, h, w) red_label (b, 1, h, w)
        # pred_label = mx.nd.argmax(rpn_cls_logits, axis=1, keepdims=True)
        pred_label = mx.nd.sigmoid(rpn_cls_logits) >= 0.5
        # label (b, 1, h, w)
        num_acc = mx.nd.sum((pred_label == rpn_label) * rpn_weight)

        self.sum_metric += num_acc.asscalar()
        self.num_inst += num_inst.asscalar()


class RPNL1LossMetric(mx.metric.EvalMetric):
    def __init__(self):
        super(RPNL1LossMetric, self).__init__('RPNL1Loss')

    def update(self, labels, preds):
        # label = [rpn_bbox_target, rpn_bbox_weight]
        # pred = [rpn_bbox_reg]
        rpn_bbox_target, rpn_bbox_weight = labels
        rpn_bbox_reg = preds[0]

        # calculate num_inst (average on those fg anchors)
        num_inst = mx.nd.sum(rpn_bbox_weight) / 4

        # calculate smooth_l1
        loss = mx.nd.sum(rpn_bbox_weight * mx.nd.smooth_l1(rpn_bbox_reg - rpn_bbox_target, scalar=3))

        self.sum_metric += loss.asscalar()
        self.num_inst += num_inst.asscalar()

class RCNNAccMetric(mx.metric.EvalMetric):
    def __init__(self,metric_name="RCNNAcc"):
        super(RCNNAccMetric, self).__init__(metric_name)

    def update(self, labels, preds):
        # label = [rcnn_label]
        # pred = [rcnn_cls]
        rcnn_label = labels[0]
        rcnn_cls = preds[0]

        # calculate num_acc
        pred_label = mx.nd.argmax(rcnn_cls, axis=1)
        num_acc = mx.nd.sum(pred_label == rcnn_label)

        self.sum_metric += num_acc.asscalar()
        self.num_inst += rcnn_label.size


class RCNNL1LossMetric(mx.metric.EvalMetric):
    def __init__(self,metric_name= "RCNNL1Loss"):
        super(RCNNL1LossMetric, self).__init__(metric_name)

    def update(self, labels, preds):
        # label = [rcnn_bbox_target, rcnn_bbox_weight]
        # pred = [rcnn_reg]
        rcnn_bbox_target, rcnn_bbox_weight = labels
        rcnn_bbox_reg = preds[0]

        # calculate num_inst
        num_inst = mx.nd.sum(rcnn_bbox_weight) / 4
        # calculate smooth_l1
        loss = mx.nd.sum(rcnn_bbox_weight * mx.nd.smooth_l1(rcnn_bbox_reg - rcnn_bbox_target, scalar=1))

        self.sum_metric += loss.asscalar()
        self.num_inst += num_inst.asscalar()

def get_dataset(dataset, args):
    if dataset.lower() == 'voc':
        train_dataset = gdata.VOCDetection(
            splits=[(2007, 'trainval'), (2012, 'trainval')])
        val_dataset = gdata.VOCDetection(
            splits=[(2007, 'test')])
        val_metric = VOC07MApMetric(iou_thresh=0.5, class_names=val_dataset.classes)
    elif dataset.lower() == 'coco':
        train_dataset = gdata.COCODetection(splits='instances_train2017')
        val_dataset = gdata.COCODetection(splits='instances_val2017', skip_empty=False)
        val_metric = COCODetectionMetric(val_dataset, args.save_prefix + '_eval', cleanup=True)
    else:
        raise NotImplementedError('Dataset: {} not implemented.'.format(dataset))
    return train_dataset, val_dataset, val_metric

def get_dataloader(net, train_dataset, val_dataset, short, max_size, batch_size, num_workers):
    """Get dataloader."""
    train_bfn = batchify.Tuple(*[batchify.Append() for _ in range(3)])
    train_loader = mx.gluon.data.DataLoader(
        train_dataset.transform(FasterRCNNDefaultTrainTransform(short, max_size, net)),
        batch_size, True, batchify_fn=train_bfn, last_batch='rollover', num_workers=num_workers)
    val_bfn = batchify.Tuple(*[batchify.Append() for _ in range(3)])
    val_loader = mx.gluon.data.DataLoader(
        val_dataset.transform(FasterRCNNDefaultValTransform(short, max_size)),
        batch_size, False, batchify_fn=val_bfn, last_batch='keep', num_workers=num_workers)
    return train_loader, val_loader

def save_params(net, logger, best_map, current_map, epoch, save_interval, prefix):
    current_map = float(current_map)
    if current_map > best_map[0]:
        logger.info('[Epoch {}] mAP {} higher than current best {} saving to {}'.format(
                    epoch, current_map, best_map, '{:s}_best.params'.format(prefix)))
        best_map[0] = current_map
        net.save_parameters('{:s}_best.params'.format(prefix))
        with open(prefix+'_best_map.log', 'a') as f:
            f.write('\n{:04d}:\t{:.4f}'.format(epoch, current_map))
    if save_interval and (epoch + 1) % save_interval == 0:
        logger.info('[Epoch {}] Saving parameters to {}'.format(
            epoch, '{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map)))
        net.save_parameters('{:s}_{:04d}_{:.4f}.params'.format(prefix, epoch, current_map))

def split_and_load(batch, ctx_list):
    """Split data to 1 batch each device."""
    num_ctx = len(ctx_list)
    new_batch = []
    for i, data in enumerate(batch):
        #data=np.asarray(data)
        if not isinstance(data, ndarray.NDArray):
            data = ndarray.array(data, ctx=ctx_list[0])
        if len(ctx_list) == 1:
            return [data.as_in_context(ctx_list[0])]

        slices = gluon.utils.split_data(data, len(ctx_list), 0, True)
        new_data = [x.as_in_context(ctx) for x, ctx in zip(slices, ctx_list)]
        new_batch.append(new_data)
    return new_batch

def validate(net, val_data, ctx, eval_metric):
    """Test on validation dataset."""
    eval_metric.reset()
    # set nms threshold and topk constraint
    net.set_nms(nms_thresh=0.3, nms_topk=400)
    net.hybridize(static_alloc=True)
    for batch in val_data:
        batch =[gluon.utils.split_and_load(mx.nd.concatenate(batch[it]), ctx_list=ctx, batch_axis=0) for it in range(0,3)]
        det_bboxes = []
        det_ids = []
        det_scores = []
        gt_bboxes = []
        gt_ids = []
        gt_difficults = []
        for x_, y_, im_scale_ in zip(*batch):
            for ix in range (x_.shape[0]):
                x=x_[ix:ix+1]
                y=y_[ix:ix+1]
                im_scale=im_scale_[ix:ix+1]
                idx = np.where(y[0, :, 0].asnumpy() > -1)[0][-1]
                y=y[:,:idx+1,:]
                # get prediction results
                ids, scores, bboxes = net(x)
                det_ids.append(ids.expand_dims(0))
                det_scores.append(scores.expand_dims(0))
                # clip to image size
                det_bboxes.append(mx.nd.Custom(bboxes, x, op_type='bbox_clip_to_image').expand_dims(0))
                # rescale to original resolution
                im_scale = im_scale.reshape((-1)).asscalar()
                det_bboxes[-1] *= im_scale
                # split ground truths
                gt_ids.append(y.slice_axis(axis=-1, begin=4, end=5))
                gt_bboxes.append(y.slice_axis(axis=-1, begin=0, end=4))
                gt_bboxes[-1] *= im_scale
                gt_difficults.append(y.slice_axis(axis=-1, begin=5, end=6) if y.shape[-1] > 5 else None)

        # update metric
        for det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff in zip(det_bboxes, det_ids, det_scores, gt_bboxes, gt_ids, gt_difficults):
            eval_metric.update(det_bbox, det_id, det_score, gt_bbox, gt_id, gt_diff)
    return eval_metric.get()
 

def get_rcnn_cls_loss(cls_pred, cls_targets):
    num_rcnn_pos = (cls_targets >= 0).sum()
    rcnn_softmaxCE_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
    rcnn_cls_loss = rcnn_softmaxCE_loss(cls_pred, cls_targets, cls_targets >= 0) * cls_targets.size / cls_targets.shape[0] / num_rcnn_pos
    return rcnn_cls_loss

def get_rcnn_box_loss(box_pred, box_targets, box_masks, cls_targets):
    num_rcnn_pos = (cls_targets >= 0).sum()
    rcnn_huber_loss = mx.gluon.loss.HuberLoss()
    rcnn_box_loss = rcnn_huber_loss(box_pred, box_targets, box_masks) * box_pred.size / box_pred.shape[0] / num_rcnn_pos
    return rcnn_box_loss

def get_rcnn_cls_box_loss(cls_pred, cls_targets, box_pred, box_targets, box_masks):
    rcnn_cls_loss = get_rcnn_cls_loss(cls_pred, cls_targets)
    rcnn_box_loss = get_rcnn_box_loss(box_pred, box_targets, box_masks, cls_targets)
    return rcnn_cls_loss,  rcnn_box_loss




def get_rpn_cls_loss(rpn_prediction, rpn_cls_targets, rpn_box_targets, rpn_box_masks):

    rpn_cls_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
    rpn_box_loss = mx.gluon.loss.HuberLoss(rho=1/9.)  # == smoothl1

    rpn_score, rpn_box, anchors = rpn_prediction
    rpn_score = rpn_score.squeeze(axis=-1)
    num_rpn_pos = (rpn_cls_targets >= 0).sum()
    rpn_loss1 = rpn_cls_loss(rpn_score, rpn_cls_targets, rpn_cls_targets >= 0) * rpn_cls_targets.size / num_rpn_pos
    rpn_loss2 = rpn_box_loss(rpn_box, rpn_box_targets, rpn_box_masks) * rpn_box.size / num_rpn_pos
    rpn_loss = rpn_loss1 + rpn_loss2

    rpn_loss1 = rpn_cls_loss(rpn_score, rpn_cls_targets, rpn_cls_targets >= 0) * rpn_cls_targets.size / num_rpn_pos
    rpn_loss2 = rpn_box_loss(rpn_box, rpn_box_targets, rpn_box_masks) * rpn_box.size / num_rpn_pos
    return rpn_loss1, rpn_loss2, rpn_score, rpn_box

def get_lr_at_iter(alpha):
    return 1. / 3. * (1 - alpha) + alpha

def train(net, train_data, val_data, eval_metric, args):
    """Training pipeline"""


    net.collect_params().reset_ctx(ctx)
    #net.collect_train_params().setattr('grad_req','add')
    trainer = gluon.Trainer(
        net.collect_train_params(),  # fix batchnorm, fix first stage, etc...
        'sgd',
        {'learning_rate': args.lr,
         'wd': args.wd,
         'momentum': args.momentum,
         'clip_gradient': 5})

    # lr decay policy
    lr_decay = float(args.lr_decay)
    lr_steps = sorted([float(ls) for ls in args.lr_decay_epoch.split(',') if ls.strip()])
    lr_warmup = int(args.lr_warmup)

    # TODO(zhreshold) losses?
    rpn_cls_loss = mx.gluon.loss.SigmoidBinaryCrossEntropyLoss(from_sigmoid=False)
    rpn_box_loss = mx.gluon.loss.HuberLoss(rho=1/9.)  # == smoothl1
    rcnn_cls_loss = mx.gluon.loss.SoftmaxCrossEntropyLoss()
    rcnn_box_loss = mx.gluon.loss.HuberLoss()  # == smoothl1
    metrics = [mx.metric.Loss('RPN_Conf'),
               mx.metric.Loss('RPN_SmoothL1'),
               mx.metric.Loss('RCNN_CrossEntropy'),
               mx.metric.Loss('RCNN_SmoothL1'),
               mx.metric.Loss('2nd_CrossEntropy'),
               mx.metric.Loss('2nd_SmoothL1'),
               mx.metric.Loss('3rd_CrossEntropy'),
               mx.metric.Loss('3rd_SmoothL1'),]

    rpn_acc_metric = RPNAccMetric()
    rpn_bbox_metric = RPNL1LossMetric()
    rcnn_acc_metric = RCNNAccMetric()
    rcnn_bbox_metric = RCNNL1LossMetric()
    cascade_2nd_acc_metric = RCNNAccMetric("Acc_2nd")
    cascade_2nd_bbox_metric = RCNNL1LossMetric("L1loss_2nd")
    cascade_3rd_acc_metric = RCNNAccMetric("Acc_3rd")
    cascade_3rd_bbox_metric = RCNNL1LossMetric("L1loss_3rd")    
    metrics2 = [rpn_acc_metric, rpn_bbox_metric, rcnn_acc_metric, rcnn_bbox_metric,\
                cascade_2nd_acc_metric,cascade_2nd_bbox_metric,cascade_3rd_acc_metric,cascade_3rd_bbox_metric]

    # set up logger
    logging.basicConfig()
    logger = logging.getLogger()
    logger.setLevel(logging.INFO)
    log_file_path = args.save_prefix + '_train.log'
    log_dir = os.path.dirname(log_file_path)
    if log_dir and not os.path.exists(log_dir):
        os.makedirs(log_dir)
    fh = logging.FileHandler(log_file_path)
    logger.addHandler(fh)
    logger.info(args)
    if args.verbose:
        logger.info('Trainable parameters:')
        logger.info(net.collect_train_params().keys())
    logger.info('Start training from [Epoch {}]'.format(args.start_epoch))
    best_map = [0]
    for epoch in range(args.start_epoch, args.epochs):
        while lr_steps and epoch >= lr_steps[0]:
            new_lr = trainer.learning_rate * lr_decay
            lr_steps.pop(0)
            trainer.set_learning_rate(new_lr)
            logger.info("[Epoch {}] Set learning rate to {}".format(epoch, new_lr))
        for metric in metrics:
            metric.reset()
        tic = time.time()
        btic = time.time()
        net.hybridize(static_alloc=True)
        print(net)
        base_lr = trainer.learning_rate
        #train start
        print('training  start-----------------------')
        print(net.collect_params())
        for i, batch in enumerate(train_data):
            if epoch == 0 and i <= lr_warmup:
                new_lr = base_lr * get_lr_at_iter((i // 500) / (lr_warmup / 500.))
                if new_lr != trainer.learning_rate:
                    logger.info('[Epoch 0 Iteration {}] Set learning rate to {}'.format(i, new_lr))
                    trainer.set_learning_rate(new_lr)
            batch_size = len(batch[0])
            #print(batch[0]) #batch =split_and_load(batch, ctx_list=ctx)
            batch = [gluon.utils.split_and_load(mx.nd.concatenate(batch[it]), ctx_list=ctx, batch_axis=0) for it in range(0,3)]
            losses = []
            metric_losses = [[] for _ in metrics]
            add_losses = [[] for _ in metrics2]
            with autograd.record():
                for data_, label_, im_info_ in zip(*batch):
                    for ix in range (data_.shape[0]):
                        data =data_[ix:ix+1];label=label_[ix:ix+1];im_info=im_info_[ix:ix+1]
                        idx = np.where(label[0, :, 0].asnumpy() > -1)[0][-1]
                        label=label[:,:idx+1,:]
                        gt_label = label[:, :, 4:5]
                        gt_box = label[:, :, :4]
                        data = data[:,:,:int(im_info[0,1].asnumpy()[0]),:int(im_info[0,0].asnumpy()[0])]
                        gt_label = label[:, :, 4:5]
                        gt_box = label[:, :, :4]
     
                        rpn_pred, cascade_rcnn_pred = net(data, gt_box)
                        rpn_score, rpn_box, anchors = rpn_pred
                        rpn_cls_targets, rpn_box_targets, rpn_box_masks = net.rpn_target_generator(
                                                    gt_box, anchors[0],\
                                                     im_info[0,0],im_info[0,1])
                        # losses of rpn
                        rpn_loss1, rpn_loss2, rpn_score, rpn_box = get_rpn_cls_loss(rpn_pred, rpn_cls_targets, rpn_box_targets, rpn_box_masks)
                        rpn_loss = rpn_loss1 + rpn_loss2
                        Q = []
                        addQ = []
                        Q.append(rpn_loss1)
                        Q.append(rpn_loss2)
                        addQ.append([[rpn_cls_targets, rpn_cls_targets>=0], [rpn_score]])
                        addQ.append([[rpn_box_targets, rpn_box_masks], [rpn_box]])
     

                        # generate targets for rcnn
                        cascade_rcnn_loss = 0
                        for stage, cascade_rcnn_stage in enumerate(cascade_rcnn_pred):
                            cls_pred, box_pred, *sample_data = cascade_rcnn_stage
                            roi, samples, matches = sample_data
                            if stage==0:
                                cls_targets, box_targets, box_masks = net.target_generator(roi, samples, matches, gt_label, gt_box)
                            if stage==1:
                                cls_targets, box_targets, box_masks = net.target_generator_2nd(roi, samples, matches, gt_label, gt_box)
                            if stage==2:
                                cls_targets, box_targets, box_masks = net.target_generator_3rd(roi, samples, matches, gt_label, gt_box)
                            rcnn_cls_loss, rcnn_box_loss = get_rcnn_cls_box_loss(cls_pred, cls_targets, box_pred, box_targets, box_masks)
                            rcnn_loss = rcnn_cls_loss + rcnn_box_loss

                            Q.append(rcnn_cls_loss)
                            Q.append(rcnn_box_loss) 
                            addQ.append([[cls_targets], [cls_pred]])
                            addQ.append([[box_targets, box_masks], [box_pred]])

                            cascade_rcnn_stage_name = 'rcnn_stage_{}'.format(stage)
                            weight = 1.0 / (2**stage)
                            cascade_rcnn_loss = cascade_rcnn_loss + weight * rcnn_loss.sum()

     
                        losses.append(cascade_rcnn_loss + rpn_loss.sum())
                        for loss_idx, loss in enumerate(Q):
                            metric_losses[loss_idx].append(loss.sum())

                        for loss_idx, loss in enumerate(addQ):
                            add_losses[loss_idx].append(loss)
     
                autograd.backward(losses)
                for metric, record in zip(metrics, metric_losses):
                    metric.update(0, record)

                for metric, records in zip(metrics2, add_losses):
                    for pred in records:
                        metric.update(pred[0], pred[1])
            trainer.step(batch_size)
            # update metrics
            if args.log_interval and not (i + 1) % args.log_interval:
                # msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics])

                msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics + metrics2])
                logger.info('[Epoch {}][Batch {}], Speed: {:.3f} samples/sec, {}'.format(
                    epoch, i, batch_size/(time.time()-btic), msg))
            btic = time.time()
        #train loop end
        print('\n')

        msg = ','.join(['{}={:.3f}'.format(*metric.get()) for metric in metrics])
        logger.info('[Epoch {}] Training cost: {:.3f}, {}'.format(epoch, (time.time()-tic), msg))
        print('training  ended-----------------------\n\n\n')


        print('start validation--------')
        if not (epoch + 1) % args.val_interval:
            # consider reduce the frequency of validation to save time
            map_name, mean_ap = validate(net, val_data, ctx, eval_metric)
            val_msg = '\n'.join(['{}={}'.format(k, v) for k, v in zip(map_name, mean_ap)])
            logger.info('[Epoch {}] Validation: \n{}'.format(epoch, val_msg))
            current_map = float(mean_ap[-1])
        else:
            current_map = 0.
        print('validation ended--------')
        save_params(net,logger, best_map, current_map, epoch, args.save_interval, args.save_prefix)

 
if __name__ == '__main__':
    args = parse_args()

    # fix seed for mxnet, numpy and python builtin random generator.
    gutils.random.seed(args.seed)
 
    # training contexts
    ctx = [mx.gpu(int(i)) for i in args.gpus.split(',') if i.strip()]
    ctx = ctx if ctx else [mx.cpu()]
    args.batch_size = len(ctx)*4  # 1 batch per device

    # network
    net_name = '_'.join(('cascade_rcnn', args.network, args.dataset))
    args.save_prefix += net_name
    print(net_name)
    net = get_model(net_name, pretrained_base=True)
    if args.resume.strip():
        net.load_params(args.resume.strip())
    else:
        for param in net.collect_params().values():
            if param._data is not None:
                continue
            param.initialize()

    # training data
    train_dataset, val_dataset, eval_metric = get_dataset(args.dataset, args)
    train_data, val_data = get_dataloader(
        net, train_dataset, val_dataset, args.short, args.max_size, args.batch_size, args.num_workers)

    # training
    train(net, train_data, val_data, eval_metric, args)
 
