import torch
import torch.backends.cudnn as cudnn
from torch.utils.data import DataLoader
import os
import os.path as osp
import argparse
import time
import pprint
import sys
import numpy as np
import torch.distributed as dist
torch.distributed.init_process_group(backend="nccl")

sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..'))
from configs import load_cfg
from lib.utils.logger import get_logger
from lib.data.widerface import WiderFaceDetection
# from lib.nn.mobilenetv2 import get_mobile_net
from lib.utils.metric import AverageMeter, WiderFaceMetric
from lib.nn.centernet_ttf import CenterNet
from lib.utils.scheduler import WarmupCosineLR

def get_args():
    parser = argparse.ArgumentParser(description='CenterFace Detection')
    parser.add_argument('--cfg', default='', type=str, help='Config file path')
    parser.add_argument('--local_rank', default=-1, type=int)
    return parser.parse_args()


def get_data(cfg):
    # bs = len(cfg['gpus']) * cfg['train']['batch_size_per_gpu']
    bs = cfg['train']['batch_size_per_gpu']
    j = cfg['workers']
    train_set = WiderFaceDetection(cfg, split='train')
    train_sampler = torch.utils.data.distributed.DistributedSampler(train_set)
    train_dataloader = DataLoader(train_set, batch_size=bs, shuffle=False, num_workers=j, collate_fn=train_set.detection_collate, pin_memory=True, sampler=train_sampler)
    
    # val_set = WiderFaceDetection(cfg, split='val')
    # val_dataloader = DataLoader(val_set, batch_size=bs, shuffle=False, num_workers=j, collate_fn=detection_collate)
    val_dataloader = None
    return train_sampler, train_dataloader, val_dataloader

def get_model(cfg):
    model = CenterNet(cfg)
    loss_metric = model.loss
    return model, loss_metric

def reduce_tensor(tensor: torch.Tensor) -> torch.Tensor:
    rt = tensor.clone()
    dist.all_reduce(rt, op=dist.ReduceOp.SUM)
    rt /= dist.get_world_size()
    return rt

def train(cfg, model, train_dataloader):
    optimizer = torch.optim.Adam(model.parameters(), lr=cfg['train']['lr'],
              weight_decay=cfg['train']['wd'])
    # optimizer = torch.optim.SGD(model.parameters(), 
    #         lr=cfg['train']['lr'], 
    #         momentum=cfg['train']['moentum'], 
    #         weight_decay=cfg['train']['wd'])
    # bs = len(cfg['gpus']) * cfg['train']['batch_size_per_gpu']
    # batch_epoch = len(train_dataloader)
    # max_iters, warm_iters = cfg['train']['max_epoch']*batch_epoch, 500
    # scheduler = WarmupCosineLR(optimizer, cfg['train']['lr'], 1e-6, max_iters, warm_iters, eta_min=1e-8)
    hm_metric = AverageMeter()
    ltrb_metric = AverageMeter()
    landm_metric = AverageMeter()

    for epoch in range(1+start_epoch, cfg['train']['max_epoch'] + 1):
        train_sampler.set_epoch(epoch)
        model.train()
        hm_metric.reset()
        ltrb_metric.reset()
        landm_metric.reset()
        # step lr
        count = sum([1 for s in cfg['train']['lr_step'] if s <= epoch])
        lr = cfg['train']['lr'] * (0.1**count)
        for param_group in optimizer.param_groups:
            param_group['lr'] = lr
        
        for idx, batch in enumerate(train_dataloader):
            inputs = batch[0].to(device) 
            targets = [anno.to(device) for anno in batch[1]]

            optimizer.zero_grad()
            outputs = model(inputs)
            hm_loss, ltrb_loss, landm_loss = loss_calc(outputs['cls'], outputs['ltrb'], outputs['landm'], targets)

            loss = hm_loss+ltrb_loss+landm_loss

            loss.backward()
            optimizer.step()

            reduced_hm_loss = reduce_tensor(hm_loss.data)
            reduced_ltrb_loss = reduce_tensor(ltrb_loss.data)
            reduced_landm_loss = reduce_tensor(landm_loss.data)
            bs = inputs.shape[0]
            hm_metric.update(reduced_hm_loss.item(), bs)
            ltrb_metric.update(reduced_ltrb_loss.item(), bs)
            landm_metric.update(reduced_landm_loss.item(), bs)

            if dist.get_rank() == 0:
                if not (idx+1)%100:
                    logger.info('[Epoch {}][Batch {}/{}], LR={:.2e}, {}={:.4f}, {}={:.4f}, {}={:.4f}'.format(
                    epoch, idx+1, len(train_dataloader), optimizer.param_groups[0]['lr'], 
                    'hm', hm_metric.avg, 
                    'ltrb', ltrb_metric.avg,
                    'landm', landm_metric.avg))

        # if not epoch%cfg.TEST_FREQ:
        #     names, maps = validate(cfg, model, val_dataloader)
        #     for k, v in zip(names, maps):
        #         logger.info('{:7}MAP = {}'.format(k, v))
        if dist.get_rank()== 3:
            state = {'model': model.state_dict(), 'epoch': epoch}
            torch.save(state, osp.join(cfg['output_dir'], 'model-%03d.pth' % (epoch)))


# python -m torch.distributed.launch --nproc_per_node=4  scripts/train_dist.py --cfg configs/cfg_mobilenetv2.py
if __name__ == "__main__":
    opt = get_args()
    cfg = load_cfg(opt.cfg)
    os.environ["CUDA_VISIBLE_DEVICES"] = ','.join([str(i) for i in cfg['gpus']])

    logger = get_logger(cfg)
    logger.info(pprint.pformat(opt))
    logger.info(pprint.pformat(cfg))

    train_sampler, train_dataloader, val_dataloader = get_data(cfg)
    device = 'cuda' if len(cfg['gpus']) else 'cpu'
    model, loss_calc = get_model(cfg)
    start_epoch = 0
    if cfg['pretrain']:
        state = torch.load(cfg['pretrain'])
        model.load_state_dict({k.replace('module.',''):v for k,v in state['model'].items()})
        start_epoch = state['epoch']

        # new_state_dict = {k.replace('module.',''):v for k,v in state['model'].items()}
        # model_dict = model.state_dict()
        # pretrained_dict = {k: v for k, v in new_state_dict.items() if k in model_dict}#filter out unnecessary keys 
        # model_dict.update(pretrained_dict)
        # model.load_state_dict(model_dict)

    
    torch.cuda.set_device(opt.local_rank)
    device = torch.device("cuda", opt.local_rank)
    model = model.to(device)
    if len(cfg['gpus']):
        # model = torch.nn.DataParallel(model)
        model=torch.nn.parallel.DistributedDataParallel(model, device_ids=[opt.local_rank])
        cudnn.benchmark = True

    train(cfg, model, train_dataloader)

