import torch
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import os
import os.path as osp
import argparse
import time
import pprint
import sys
import numpy as np

sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from lib.utils.logger import get_logger
from lib.config import cfg, update_config
# from lib.data.dataset import WiderFaceDetection, detection_collate
from lib.data.cartoonface import CartoonFaceDetection, detection_collate
from lib.nn.loss import HeatmapFocalLoss, MaskedL1Loss, GIouLoss
# from lib.nn.mobilenetv2 import get_mobile_net
from lib.utils.metric import AverageMeter, WiderFaceMetric
from lib.nn.cenetrnet import CenterNet
from lib.utils.scheduler import WarmupCosineLR


import torch.utils.data.distributed
import torch.distributed as dist
from apex import amp

def get_args():
    parser = argparse.ArgumentParser(description='CenterFace Detection')
    parser.add_argument('--cfg', default='', type=str, help='Config file path')
    return parser.parse_args()


def get_data(cfg):
    bs = len(cfg.GPUS) * cfg.TRAIN.BATCH_SIZE_PER_GPU
    j = cfg.WORKERS
    train_set = CartoonFaceDetection(cfg, split='train')
    train_dataloader = DataLoader(train_set, batch_size=bs, shuffle=True, num_workers=j)
    
    # val_set = WiderFaceDetection(cfg, split='val')
    # val_dataloader = DataLoader(val_set, batch_size=bs, shuffle=False, num_workers=j, collate_fn=detection_collate)
    val_dataloader = None

    # for i in range(train_set.__len__()):
    #     ret = train_set.__getitem__(i)
    #     img = ret['img']
    #     heatmap = ret['heatmap'][0]
    #     print(img.shape)
    return train_dataloader, val_dataloader

def get_model(cfg):
    # heads = {'hm':1, 'hm_offset':2, 'wh':2}
    # model = get_mobile_net(10, heads, head_conv=24, pretrained=False)
    model = CenterNet(cfg)
    loss_calc = model.loss_calc
    return model, loss_calc 

def validate(cfg, model, val_dataloader):
    eval_metric = WiderFaceMetric(iou_thresh=0.4)
    net = CenterFace(model, flip_test=False)
    net = net.to(device)
    net.eval()
    with torch.no_grad():
        for idx, batch in enumerate(val_dataloader):
            inputs = torch.FloatTensor(batch['img']).to(device) 
            gt_bbox = batch['gt_bbox']
            gt_list = batch['gt_list']
            # outputs = model(inputs)[0]
            # heatmap_pred = outputs['hm']
            # center_reg_pred = outputs['hm_offset']
            # wh_pred = outputs['wh']
            # landm_reg_pred = outputs['landmarks']
            # bboxes, scores, lmreg, clses = net(heatmap_pred, center_reg_pred, wh_pred, landm_reg_pred)
            bboxes, scores, lmreg, clses = net(inputs)
            bboxes = bboxes.cpu().numpy()
            scores = scores.cpu().reshape(scores.shape[0], -1).numpy()
            eval_metric.update(bboxes, scores, gt_bbox, gt_list)  
    return eval_metric.get()

def train(cfg, model, optimizer, train_dataloader):

    # optimizer = torch.optim.SGD(model.parameters(), 
    #         lr=cfg.TRAIN.LR, momentum=cfg.TRAIN.MOENTUM, weight_decay=cfg.TRAIN.WD)
    # scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, cfg.TRAIN.MAX_EPOCH, eta_min=1e-6)
    heatmap_metric = AverageMeter()
    wh_metric = AverageMeter()
    for epoch in range(1+start_epoch, int(cfg.TRAIN.MAX_EPOCH) + 1):
        model.train()
        heatmap_metric.reset()
        wh_metric.reset()
        # step lr
        count = sum([1 for s in cfg.TRAIN.LR_STEP if s <= epoch])
        lr = cfg.TRAIN.LR * (cfg.TRAIN.LR_FACTOR**count)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        
        for idx, batch in enumerate(train_dataloader):
            
            inputs = torch.FloatTensor(batch['img']).to(device) 
            heatmap = torch.FloatTensor(batch['heatmap']).to(device) 
            wh_target = torch.FloatTensor(batch['wh_target']).to(device) 
            wh_weight = torch.FloatTensor(batch['wh_weight']).to(device) 

            optimizer.zero_grad()
            outputs = model(inputs)
            pred_hm = outputs['cls']
            pred_wh = outputs['wh']
            hm_loss, wh_loss = loss_calc(pred_hm, pred_wh, heatmap, wh_target, wh_weight)

            loss = hm_loss+wh_loss

            # loss.backward()
            with amp.scale_loss(loss, optimizer) as scaled_loss:
                scaled_loss.backward()
            optimizer.step()

            bs = inputs.shape[0]
            heatmap_metric.update(hm_loss.item(), bs)
            wh_metric.update(wh_loss.item(), bs)

            if not (idx+1)%20:
                logger.info('[Epoch {}][Batch {}/{}], LR={:.2e}, {}={:.3f}, {}={:.3f}'.format(
                epoch, idx+1, len(train_dataloader), optimizer.param_groups[0]['lr'], 
                'Heatmap', heatmap_metric.avg, 
                'WH', wh_metric.avg))

        # if not epoch%cfg.TEST_FREQ:
        #     names, maps = validate(cfg, model, val_dataloader)
        #     for k, v in zip(names, maps):
        #         logger.info('{:7}MAP = {}'.format(k, v))
        # scheduler.step()
        state = {'model': model.state_dict(), 
                'epoch': epoch, 
                'optimizer': optimizer.state_dict(),
                'amp': amp.state_dict()}
        torch.save(state, osp.join(cfg.OUTPUT_DIR, 'model-%03d.pth' % (epoch)))


if __name__ == "__main__":
    opt = get_args()
    cfg.merge_from_file(opt.cfg)
    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(i) for i in cfg.GPUS])

    logger = get_logger(cfg)
    logger.info(pprint.pformat(opt))
    logger.info(pprint.pformat(cfg))

    train_dataloader, val_dataloader = get_data(cfg)
    device = 'cuda' if len(cfg.GPUS) else 'cpu'
    model, loss_calc = get_model(cfg)
    start_epoch = 0

    if cfg.SYNCE_BN:
        import apex
        print("using apex synced BN")
        model = apex.parallel.convert_syncbn_model(model)
    optimizer = torch.optim.Adam(model.parameters(), 
            lr=cfg.TRAIN.LR, weight_decay=cfg.TRAIN.WD)    

    model = model.to(device)
    if len(cfg.GPUS):
        model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
        model = torch.nn.DataParallel(model)
        cudnn.benchmark = True

    if cfg.PRETRAIN:
        state = torch.load(cfg.PRETRAIN)
        model.load_state_dict({k.replace('module.',''):v for k,v in state['model'].items()})
        start_epoch = state['epoch']
        optimizer.load_state_dict(state['optimizer'])
        amp.load_state_dict(state['amp'])
        # new_state_dict = {k.replace('module.',''):v for k,v in state['model'].items()}
        # model_dict = model.state_dict()
        # pretrained_dict = {k: v for k, v in new_state_dict.items() if k in model_dict}#filter out unnecessary keys 
        # model_dict.update(pretrained_dict)
        # model.load_state_dict(model_dict)

    train(cfg, model, optimizer, train_dataloader)

