import torch
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import os
import os.path as osp
import argparse
import time
import pprint
import sys
import numpy as np


sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from configs import load_cfg
from lib.utils.logger import get_logger
from lib.data.widerface import WiderFaceDetection
# from lib.nn.mobilenetv2 import get_mobile_net
from lib.utils.metric import AverageMeter, WiderFaceMetric
from lib.nn.centernet_ttf import CenterNet
from lib.utils.scheduler import WarmupCosineLR

def get_args():
    parser = argparse.ArgumentParser(description='CenterFace Detection')
    parser.add_argument('--cfg', default='', type=str, help='Config file path')
    return parser.parse_args()


def get_data(cfg):
    bs = len(cfg['gpus']) * cfg['train']['batch_size_per_gpu']
    j = cfg['workers']
    train_set = WiderFaceDetection(cfg, split='train')
    train_dataloader = DataLoader(train_set, batch_size=bs, shuffle=True, num_workers=j, collate_fn=train_set.detection_collate)
    
    # val_set = WiderFaceDetection(cfg, split='val')
    # val_dataloader = DataLoader(val_set, batch_size=bs, shuffle=False, num_workers=j, collate_fn=detection_collate)
    val_dataloader = None
    return train_dataloader, val_dataloader

def get_model(cfg):
    model = CenterNet(cfg)
    loss_metric = model.loss
    return model, loss_metric




def train(cfg, model, train_dataloader):
    optimizer = torch.optim.Adam(model.parameters(), lr=cfg['train']['lr'],
              weight_decay=cfg['train']['wd'])
    # optimizer = torch.optim.SGD(model.parameters(), 
    #         lr=cfg['train']['lr'], 
    #         momentum=cfg['train']['moentum'], 
    #         weight_decay=cfg['train']['wd'])
    # bs = len(cfg['gpus']) * cfg['train']['batch_size_per_gpu']
    # batch_epoch = len(train_dataloader)
    # max_iters, warm_iters = cfg['train']['max_epoch']*batch_epoch, 500
    # scheduler = WarmupCosineLR(optimizer, cfg['train']['lr'], 1e-6, max_iters, warm_iters, eta_min=1e-8)
    hm_metric = AverageMeter()
    ltrb_metric = AverageMeter()
    landm_metric = AverageMeter()

    for epoch in range(1+start_epoch, cfg['train']['max_epoch'] + 1):
        model.train()
        hm_metric.reset()
        ltrb_metric.reset()
        landm_metric.reset()
        # step lr
        count = sum([1 for s in cfg['train']['lr_step'] if s <= epoch])
        lr = cfg['train']['lr'] * (0.1**count)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        
        for idx, batch in enumerate(train_dataloader):
            inputs = batch[0].to(device) 
            targets = [anno.to(device) for anno in batch[1]]

            optimizer.zero_grad()
            outputs = model(inputs)
            hm_loss, ltrb_loss, landm_loss = loss_calc(outputs['cls'], outputs['ltrb'], outputs['landm'], targets)

            loss = hm_loss+ltrb_loss+landm_loss

            loss.backward()
            optimizer.step()

            bs = inputs.shape[0]
            hm_metric.update(hm_loss.item(), bs)
            ltrb_metric.update(ltrb_loss.item(), bs)
            landm_metric.update(landm_loss.item(), bs)


            if not (idx+1)%10:
                logger.info('[Epoch {}][Batch {}/{}], LR={:.2e}, {}={:.4f}, {}={:.4f}, {}={:.4f}'.format(
                epoch, idx+1, len(train_dataloader), optimizer.param_groups[0]['lr'], 
                'hm', hm_metric.avg, 
                'ltrb', ltrb_metric.avg,
                'landm', landm_metric.avg))

        # if not epoch%cfg.TEST_FREQ:
        #     names, maps = validate(cfg, model, val_dataloader)
        #     for k, v in zip(names, maps):
        #         logger.info('{:7}MAP = {}'.format(k, v))

        state = {'model': model.state_dict(), 'epoch': epoch}
        torch.save(state, osp.join(cfg['output_dir'], 'model-%03d.pth' % (epoch)))


if __name__ == "__main__":
    opt = get_args()
    cfg = load_cfg(opt.cfg)
    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(i) for i in cfg['gpus']])

    logger = get_logger(cfg)
    logger.info(pprint.pformat(opt))
    logger.info(pprint.pformat(cfg))

    train_dataloader, val_dataloader = get_data(cfg)
    device = 'cuda' if len(cfg['gpus']) else 'cpu'
    model, loss_calc = get_model(cfg)
    start_epoch = 0
    if cfg['pretrain']:
        state = torch.load(cfg['pretrain'])
        model.load_state_dict({k.replace('module.',''):v for k,v in state['model'].items()})
        start_epoch = state['epoch']

        # new_state_dict = {k.replace('module.',''):v for k,v in state['model'].items()}
        # model_dict = model.state_dict()
        # pretrained_dict = {k: v for k, v in new_state_dict.items() if k in model_dict}#filter out unnecessary keys 
        # model_dict.update(pretrained_dict)
        # model.load_state_dict(model_dict)

    
    model = model.to(device)
    if len(cfg['gpus']):
        model = torch.nn.DataParallel(model)
        cudnn.benchmark = True

    train(cfg, model, train_dataloader)

