# --------------------------------------------------------
# Deformable Convolutional Networks
# Copyright (c) 2017 Microsoft
# Licensed under The MIT License [see LICENSE for details]
# Modified by Haozhi Qi
# --------------------------------------------------------
# Based on:
# MX-RCNN
# Copyright (c) 2016 by Contributors
# Licence under The Apache 2.0 License
# https://github.com/ijkguo/mx-rcnn/
# --------------------------------------------------------


import argparse
import os
import pprint

import sys
from config import config, update_config
import shutil
import numpy as np
import mxnet as mx

import logging,pickle
logging.basicConfig(level=logging.DEBUG)
from .fpnroidbdataiter import PyramidAnchorIterator
from .resnet_v1_101_fpn_dcn_rcnn import resnet_v1_101_fpn_dcn_rcnn
class Net(mx.gluon.nn.Block):
    def forward(self, *args):
        return self.sym_block(*args)
    def __init__(self,sym, params):
        super(Net,self).__init__()
        input_names = ["data","gt_boxes","im_info","label","bbox_target","bbox_weight"]
        self.sym_block = mx.gluon.nn.SymbolBlock(
            sym,
            [mx.sym.Variable(x) for x in input_names],
            params
        )

def train_net(ctx, pretrained, epoch, prefix, begin_epoch, end_epoch, lr, lr_step):
    mx.random.seed(3)
    np.random.seed(3)


    sym_instance =resnet_v1_101_fpn_dcn_rcnn()
    sym = sym_instance.get_symbol(config, is_train=True)

    feat_pyramid_level = np.log2(config.network.RPN_FEAT_STRIDE).astype(int)
    feat_sym = [sym.get_internals()['rpn_cls_score_p' + str(x) + '_output'] for x in feat_pyramid_level]
    net = Net(feat_sym)

    #
    # # setup multi-gpu
    # batch_size = len(ctx)
    # input_batch_size = config.TRAIN.BATCH_IMAGES * batch_size
    #
    # # print config
    # pprint.pprint(config)
    # logging.info('training config:{}\n'.format(pprint.pformat(config)))
    #
    # roidb = pickle.load(open("/data1/zyx/yks/dataset/retail/annotations/train.roidb","rb"))
    # train_data = PyramidAnchorIterator(feat_sym, roidb, config, batch_size=input_batch_size, shuffle=config.TRAIN.SHUFFLE,
    #                                    ctx=ctx, feat_strides=config.network.RPN_FEAT_STRIDE, anchor_scales=config.network.ANCHOR_SCALES,
    #                                    anchor_ratios=config.network.ANCHOR_RATIOS, aspect_grouping=config.TRAIN.ASPECT_GROUPING,
    #                                    allowed_border=np.inf)
    #
    # # infer max shape
    # max_data_shape = [('data', (config.TRAIN.BATCH_IMAGES, 3, max([v[0] for v in config.SCALES]), max([v[1] for v in config.SCALES])))]
    # max_data_shape, max_label_shape = train_data.infer_shape(max_data_shape)
    # max_data_shape.append(('gt_boxes', (config.TRAIN.BATCH_IMAGES, 100, 5)))
    # print 'providing maximum shape', max_data_shape, max_label_shape
    #
    # data_shape_dict = dict(train_data.provide_data_single + train_data.provide_label_single)
    # pprint.pprint(data_shape_dict)
    # sym_instance.infer_shape(data_shape_dict)

    # # load and initialize params
    # if config.TRAIN.RESUME:
    #     print('continue training from ', begin_epoch)
    #     arg_params, aux_params = load_param(prefix, begin_epoch, convert=True)
    # else:
    #     arg_params, aux_params = load_param(pretrained, epoch, convert=True)
    #     sym_instance.init_weight(config, arg_params, aux_params)
    #
    # # check parameter shapes
    # sym_instance.check_parameter_shapes(arg_params, aux_params, data_shape_dict)
    #
    # # create solver
    # fixed_param_prefix = config.network.FIXED_PARAMS
    # data_names = [k[0] for k in train_data.provide_data_single]
    # label_names = [k[0] for k in train_data.provide_label_single]
    #
    # mod = mx.mod.Module(sym, data_names=data_names, label_names=label_names, context=ctx )
    #
    # if config.TRAIN.RESUME:
    #     mod._preload_opt_states = '%s-%04d.states'%(prefix, begin_epoch)
    #
    # # decide training params
    # # metric
    # rpn_eval_metric = metric.RPNAccMetric()
    # rpn_cls_metric = metric.RPNLogLossMetric()
    # rpn_bbox_metric = metric.RPNL1LossMetric()
    # rpn_fg_metric = metric.RPNFGFraction(config)
    # eval_metric = metric.RCNNAccMetric(config)
    # eval_fg_metric = metric.RCNNFGAccuracy(config)
    # cls_metric = metric.RCNNLogLossMetric(config)
    # bbox_metric = metric.RCNNL1LossMetric(config)
    # eval_metrics = mx.metric.CompositeEvalMetric()
    # # rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, eval_metric, cls_metric, bbox_metric
    # for child_metric in [rpn_eval_metric, rpn_cls_metric, rpn_bbox_metric, rpn_fg_metric, eval_fg_metric, eval_metric, cls_metric, bbox_metric]:
    #     eval_metrics.add(child_metric)
    # # callback
    # batch_end_callback = callback.Speedometer(train_data.batch_size, frequent=args.frequent)
    # means = np.tile(np.array(config.TRAIN.BBOX_MEANS), 2 if config.CLASS_AGNOSTIC else config.dataset.NUM_CLASSES)
    # stds = np.tile(np.array(config.TRAIN.BBOX_STDS), 2 if config.CLASS_AGNOSTIC else config.dataset.NUM_CLASSES)
    # epoch_end_callback = [mx.callback.module_checkpoint(mod, prefix, period=1, save_optimizer_states=True), callback.do_checkpoint(prefix, means, stds)]
    # # decide learning rate
    # base_lr = lr
    # lr_factor = config.TRAIN.lr_factor
    # lr_epoch = [float(epoch) for epoch in lr_step.split(',')]
    # lr_epoch_diff = [epoch - begin_epoch for epoch in lr_epoch if epoch > begin_epoch]
    # lr = base_lr * (lr_factor ** (len(lr_epoch) - len(lr_epoch_diff)))
    # lr_iters = [int(epoch * len(roidb) / batch_size) for epoch in lr_epoch_diff]
    # print('lr', lr, 'lr_epoch_diff', lr_epoch_diff, 'lr_iters', lr_iters)
    # lr_scheduler = WarmupMultiFactorScheduler(lr_iters, lr_factor, config.TRAIN.warmup, config.TRAIN.warmup_lr, config.TRAIN.warmup_step)
    # # optimizer
    # optimizer_params = {'momentum': config.TRAIN.momentum,
    #                     'wd': config.TRAIN.wd,
    #                     'learning_rate': lr,
    #                     'lr_scheduler': lr_scheduler,
    #                     'clip_gradient': None}
    # #
    # if not isinstance(train_data, PrefetchingIter):
    #     train_data = PrefetchingIter(train_data)
    #
    # # train
    #
    # mod.fit(train_data, eval_metric=eval_metrics, epoch_end_callback=epoch_end_callback,
    #         batch_end_callback=batch_end_callback, kvstore=config.default.kvstore,
    #         optimizer='sgd', optimizer_params=optimizer_params,
    #         arg_params=arg_params, aux_params=aux_params, begin_epoch=begin_epoch, num_epoch=end_epoch)


def main():
    update_config("models/defomableconvnets/cfgs/resnet_v1_101_coco_trainval_fpn_dcn_end2end_ohem.yaml")
    ctx = [mx.gpu(int(i)) for i in config.gpus.split(',')]
    train_net(ctx, config.network.pretrained, config.network.pretrained_epoch, config.TRAIN.model_prefix,
              config.TRAIN.begin_epoch, config.TRAIN.end_epoch, config.TRAIN.lr,
               config.TRAIN.lr_step)

if __name__ == '__main__':
    main()
