import sys
sys.path.append('./')

import os
import argparse
import shutil
import time
import yaml
import json
from easydict import EasyDict as edict
import logging

import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.multiprocessing as mp
from torch.nn.parallel import DataParallel as DP
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.tensorboard import SummaryWriter

from utils import *

cudnn.benchmark = True
assert torch.cuda.is_available()

parser = argparse.ArgumentParser(description='PyTorch Image Classification Training')
parser.add_argument('config', default='configs/res50_imagenet.yaml', type=str, nargs='?', help='config file path')
parser.add_argument('--resume', type=str, help='ckpt file path')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
parser.add_argument('--gpus', default="0,1,3", type=str, help='GPUs id to use. separated by ,')
parser.add_argument('--print-freq', default=100, type=int, help='print frequency')
parser.add_argument('--name', default='ddp_exp', type=str, help='experiment name')

gvar = edict({
    'args': None,
    'cmd_args': None,

    # for ddp
    'rank': None,
    'world_size': None,
    'gpu': None,
    
    'saver': None,
    'best_acc1': None,
    'epoch': None,    # current epoch
    'cur_iter': None,
    'tb_writer': None
})

def main(rank=-1, world_size=-1, cmd_args=None):
    gvar.rank = rank
    gvar.world_size = world_size
    gvar.gpu = cmd_args.gpus[rank]
    args = yaml.load(open(cmd_args.config), yaml.Loader)
    args = edict(args)

    if rank >= 0:
        print(f'rank: {rank}')
        dist.init_process_group('nccl', rank=rank, world_size=world_size, init_method="tcp://127.0.0.1:11235")
        gvar.rank = rank
        gvar.world_size = world_size
        gvar.gpu = cmd_args.gpus[rank]

    if rank <= 0:
        gvar.saver = Saver(cmd_args.name)
        set_logger(gvar.saver.save_dir / 'log.txt', rank)
        logging.info("cmd_args: " + json.dumps(cmd_args, indent=4, sort_keys=True))
        logging.info("args: " + json.dumps(args, indent=4, sort_keys=True))

        tb_log_dir = gvar.saver.save_dir / 'tb_logs'
        tb_log_dir.mkdir()
        gvar.tb_writer = SummaryWriter(str(tb_log_dir))

    if rank >= 1:
        gvar.saver = Saver(cmd_args.name, exist_ok=True)
        set_logger(gvar.saver.save_dir / 'log.txt', rank) # subprocess still need set logger

    gvar.args = args
    gvar.cmd_args = cmd_args
    if args.seed is not None:
        set_seeds(args.seed)

    logging.info("Use GPU: {} for training".format(cmd_args.gpus))

    # Data loading code
    train_dataset = build_dataset(args.dataset, 'train')
    val_dataset = build_dataset(args.dataset, 'val')
    if rank >= 0:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None
    train_batch_size = args.train.batch_size
    if rank >= 0:
        train_batch_size //= gvar.world_size
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=train_batch_size,
        shuffle=(train_sampler is None),
        num_workers=args.train.workers,
        pin_memory=True,
        sampler=train_sampler,
    )

    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=args.train.batch_size,
        shuffle=False,
        num_workers=args.train.workers,
        pin_memory=True
    )


    # create model
    logging.info('building model...')
    model = build_model(args.model)

    if rank >= 0:
        logging.info(f'ddp gpu: {gvar.gpu}')
        torch.cuda.set_device(gvar.gpu)
        model = nn.SyncBatchNorm.convert_sync_batchnorm(model)
        model = DDP(model.cuda(gvar.gpu), device_ids=[gvar.gpu])
    elif len(cmd_args.gpus) > 1:
        torch.cuda.set_device(cmd_args.gpus[0])
        model = DP(model.cuda(), device_ids=cmd_args.gpus)
    else:
        torch.cuda.set_device(cmd_args.gpus[0])
        model = model.cuda()
    logging.info('model has been built')

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    optimizer = build_optimizer(args.train.optimizer, model.parameters())
    lr_scheduler = build_lr_scheduler(args.train.lr_scheduler, optimizer)

    # optionally resume from a checkpoint
    if cmd_args.resume is not None:
        logging.info(f'resuming from ckpt: {cmd_args.resume}')
        gvar.best_acc1, gvar.epoch, gvar.cur_iter = \
            resume_from_ckpt(model, optimizer, cmd_args.resume, device=torch.device(f'cuda:{cmd_args.gpus[0]}'))
        logging.info(f'best_acc1: {gvar.best_acc1}, epoch: {gvar.epoch}, cur_iter: {gvar.cur_iter}')
    else:
        gvar.best_acc1 = 0
        gvar.cur_iter = 0
        gvar.epoch = 0


    if cmd_args.evaluate:
        validate(val_loader, model, criterion, 0)
        return
    

    for epoch in range(gvar.epoch, args.train.epoch):
        gvar.epoch = epoch

        if rank >= 0:
            train_sampler.set_epoch(epoch)
        
        # train for one epoch
        train(train_loader, model, criterion, optimizer)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion)

        # remember best acc@1 and save checkpoint
        is_best = acc1 > gvar.best_acc1
        gvar.best_acc1 = max(acc1, gvar.best_acc1)

        if isinstance(model, (DDP, DP)):
            state_dict = model.module.state_dict()
        else:
            state_dict = model.state_dict()
        if rank <= 0:
            save_checkpoint({
                'epoch': epoch + 1,
                'cur_iter': gvar.cur_iter,
                'args': args,
                'state_dict': state_dict,
                'best_acc1': gvar.best_acc1,
                'optimizer' : optimizer.state_dict(),
            }, is_best)
            tb_write_scalars({
                'lr': lr_scheduler.get_last_lr()[0]
            }, True)
        
        lr_scheduler.step()


def train(train_loader, model, criterion, optimizer):
    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(
        len(train_loader),
        [batch_time, data_time, losses, top1, top5],
        prefix="Epoch: [{}]".format(gvar.epoch))

    # switch to train mode
    model.train()

    end = time.time()
    for i, (images, target) in enumerate(train_loader):
        # measure data loading time
        _data_time = time.time() - end
        data_time.update(_data_time)

        images = images.cuda(gvar.gpu, non_blocking=True)
        target = target.cuda(gvar.gpu, non_blocking=True)

        # compute output
        output = model(images)
        loss = criterion(output, target)

        # measure accuracy and record loss
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        losses.update(loss.item(), images.size(0))
        top1.update(acc1[0], images.size(0))
        top5.update(acc5[0], images.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        if 'grad_norm' in gvar.args.train and gvar.args.train.grad_norm > 0:
            torch.nn.utils.clip_grad_norm_(
                model.parameters(),
                max_norm=gvar.args.train.grad_norm,
                norm_type=2.0)
        optimizer.step()

        # measure elapsed time
        _batch_time = time.time() - end
        batch_time.update(_batch_time)
        end = time.time()
        if gvar.rank <= 0:
            tb_write_scalars({
                'Times/data_time': _data_time,
                'Times/batch_time': _batch_time,
                'Accuracy/top1_train': acc1.item(),
                'Accuracy/top5_train': acc5.item(),
                'Loss/loss_train': loss.item(),
            })
            
        if gvar.rank <= 0:
            if i % gvar.cmd_args.print_freq == 0 and i > 0:
                progress.display(i)
        gvar.cur_iter += 1


def validate(val_loader, model, criterion):
    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(
        len(val_loader),
        [batch_time, losses, top1, top5],
        prefix='Test: ')

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            images = images.cuda(non_blocking=True)
            target = target.cuda(non_blocking=True)

            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), images.size(0))
            top1.update(acc1[0], images.size(0))
            top5.update(acc5[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % gvar.cmd_args.print_freq == 0 and i > 0:
                progress.display(i)
    if gvar.rank <= 0:
        logging.info(f'Test Epoch {gvar.epoch} Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f} Loss {losses.avg:.6f}')
        tb_write_scalars({
            'Accuracy/top1_eval': top1.avg,
            'Accuracy/top5_eval': top5.avg,
            'Loss/loss_eval': losses.avg,
        }, True)

    return top1.avg


def save_checkpoint(state, is_best):
    filename = gvar.saver.save_dir / "last.pt"
    torch.save(state, filename)
    if is_best:
        tgt_fname = gvar.saver.save_dir / 'best.pt'
        shutil.copyfile(filename, str(tgt_fname))


def tb_write_scalars(data, use_epoch=False):
    step = gvar.epoch if use_epoch else gvar.cur_iter
    for k, v in data.items():
        gvar.tb_writer.add_scalar(k, v, step)


def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res


if __name__ == '__main__':
    cmd_args = parser.parse_args()
    cmd_args = edict(cmd_args.__dict__)
    cmd_args.gpus = [int(x) for x in cmd_args.gpus.split(',')]

    world_size = len(cmd_args.gpus)
    if world_size > 1:
        mp.spawn(main,
            args=(world_size, cmd_args),
            nprocs=world_size,
            join=True)
    else:
        main(-1, -1, cmd_args)