import sys
sys.path.append('./')

import argparse
import shutil
import time
import yaml
import json
import re
import math
import numpy as np
from easydict import EasyDict as edict
import logging

import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter

from pq.prune import replace_prune_module
from pq.modules.mconv import mapping
from utils import *

cudnn.benchmark = True
assert torch.cuda.is_available()

parser = argparse.ArgumentParser(description='PyTorch Image Classification Training')
parser.add_argument('config', default='configs/res18_mag.yaml', type=str, nargs='?', help='config file path')
parser.add_argument('--resume', type=str, help='ckpt file path')
parser.add_argument('--debug', action='store_true', help='use 10 step each epoch for debug')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')
parser.add_argument('--gpus', default="0,1,3", type=str, help='GPUs id to use. separated by ,')
parser.add_argument('--print-freq', default=100, type=int, help='print frequency')
parser.add_argument('--name', default='exp_res18_mag', type=str, help='experiment name')

gvar = edict({
    'args': None,
    'cmd_args': None,
    
    'saver': None,
    'best_acc1': None,
    'epoch': None,
    'cur_iter': None,
    'tb_writer': None
})



def get_prune_ratio(cur_epoch, start_epoch, end_epoch, start_ratio, end_ratio):
    if cur_epoch < start_epoch:
        return start_ratio
    elif cur_epoch >= end_epoch:
        return end_ratio
    else:
        s = (cur_epoch - start_epoch) / (end_epoch - start_epoch)
        y = start_ratio + (end_ratio - start_ratio) * s
        return y

def main():
    cmd_args = parser.parse_args()
    cmd_args = edict(cmd_args.__dict__)
    cmd_args.gpus = [int(x) for x in cmd_args.gpus.split(',')]

    args = yaml.load(open(cmd_args.config), yaml.Loader)
    args = edict(args)
    

    gvar.saver = Saver(cmd_args.name)
    set_logger(gvar.saver.save_dir / 'log.txt')
    logging.info("cmd_args: " + json.dumps(cmd_args, indent=4, sort_keys=True))
    logging.info("args: " + json.dumps(args, indent=4, sort_keys=True))

    tb_log_dir = gvar.saver.save_dir / 'tb_logs'
    tb_log_dir.mkdir()
    gvar.tb_writer = SummaryWriter(str(tb_log_dir))

    gvar.args = args
    gvar.cmd_args = cmd_args
    if args.seed is not None:
        set_seeds(args.seed)

    print("Use GPU: {} for training".format(cmd_args.gpus))

    # Data loading code
    train_dataset = build_dataset(args.dataset, 'train')
    val_dataset = build_dataset(args.dataset, 'val')
    train_loader = torch.utils.data.DataLoader(
        train_dataset,
        batch_size=args.train.batch_size,
        shuffle=True,
        num_workers=args.train.workers,
        pin_memory=True
    )

    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=args.train.batch_size,
        shuffle=False,
        num_workers=args.train.workers,
        pin_memory=True
    )


    # create model
    model = build_model(args.model)

    torch.cuda.set_device(cmd_args.gpus[0])
    model = model.cuda()
    if len(cmd_args.gpus) > 1:
        model = torch.nn.DataParallel(model.cuda(), device_ids=cmd_args.gpus)

    replace_prune_module(model, mapping)

    # define loss function (criterion) and optimizer
    criterion = build_loss(args.loss)

    optimizer = build_optimizer(args.train.optimizer, model.parameters())
    lr_scheduler = build_lr_scheduler(args.train.lr_scheduler, optimizer)

    # optionally resume from a checkpoint
    if cmd_args.resume is not None:
        logging.info(f'resuming from ckpt: {cmd_args.resume}')
        gvar.best_acc1, gvar.epoch, gvar.cur_iter = \
            resume_from_ckpt(model, optimizer, cmd_args.resume, device=torch.device(f'cuda:{cmd_args.gpus[0]}'))
        lr_scheduler.last_epoch = gvar.epoch
        logging.info(f'best_acc1: {gvar.best_acc1}, epoch: {gvar.epoch}, cur_iter: {gvar.cur_iter}')
    else:
        gvar.best_acc1 = 0
        gvar.cur_iter = 0
        gvar.epoch = 0

    if cmd_args.evaluate:
        validate(val_loader, model, criterion)
        return

    for epoch in range(gvar.epoch, args.train.epoch):
        gvar.epoch = epoch
        # train for one epoch
        train(train_loader, model, criterion, optimizer)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion)
        is_best = acc1 > gvar.best_acc1
        gvar.best_acc1 = max(acc1, gvar.best_acc1)

        if isinstance(model, torch.nn.DataParallel):
            state_dict = model.module.state_dict()
        else:
            state_dict = model.state_dict()
        save_checkpoint({
            'epoch': epoch + 1,
            'cur_iter': gvar.cur_iter,
            'args': args,
            'state_dict': state_dict,
            'best_acc1': gvar.best_acc1,
            'optimizer' : optimizer.state_dict(),
        }, is_best)
        lr_scheduler.step()
        tb_write_scalars({
            'lr': lr_scheduler.get_last_lr()[0]
        }, True)

def train(train_loader, model, criterion, optimizer):
    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(
        len(train_loader),
        [batch_time, data_time, losses, top1, top5],
        prefix="Epoch: [{}]".format(gvar.epoch))

    args = gvar.args
    iterPerEpoch = len(train_loader)

    # switch to train mode
    model.train()

    end = time.time()
    for i, (images, target) in enumerate(train_loader):
        if gvar.cmd_args.debug and i > 10:
            logging.warning('debug is enable')
            break
        # measure data loading time
        _data_time = time.time() - end
        data_time.update(_data_time)

        if gvar.cur_iter % 100 == 0:
            _r = get_prune_ratio(
                gvar.cur_iter,
                args.prune.start_epoch * iterPerEpoch,
                args.prune.end_epoch * iterPerEpoch,
                args.prune.start_ratio,
                args.prune.end_ratio
            )
            update_thres(model, _r)

        images = images.cuda(non_blocking=True)
        target = target.cuda(non_blocking=True)

        # compute output
        output = model(images)
        loss = criterion(output, target)

        # measure accuracy and record loss
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        losses.update(loss.item(), images.size(0))
        top1.update(acc1[0], images.size(0))
        top5.update(acc5[0], images.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        if 'grad_norm' in gvar.args.train and gvar.args.train.grad_norm > 0:
            torch.nn.utils.clip_grad_norm_(
                model.parameters(),
                max_norm=gvar.args.train.grad_norm,
                norm_type=2.0)
        optimizer.step()

        # measure elapsed time
        _batch_time = time.time() - end
        batch_time.update(_batch_time)
        end = time.time()

        if i % 20 == 0:
            tb_write_scalars({
                'Times/data_time': _data_time,
                'Times/batch_time': _batch_time,
                'Accuracy/top1_train': acc1.item(),
                'Accuracy/top5_train': acc5.item(),
                'Loss/loss_train': loss.item(),
                'sparsity': get_sparsity(model),
            })

        if i % gvar.cmd_args.print_freq == 0 and i > 0:
            progress.display(i)
        gvar.cur_iter += 1


def validate(val_loader, model, criterion):
    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(
        len(val_loader),
        [batch_time, losses, top1, top5],
        prefix='Test: ')

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            if gvar.cmd_args.debug and i > 10:
                logging.warning('debug is enable')
                break
            images = images.cuda(non_blocking=True)
            target = target.cuda(non_blocking=True)

            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), images.size(0))
            top1.update(acc1[0], images.size(0))
            top5.update(acc5[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % gvar.cmd_args.print_freq == 0 and i > 0:
                progress.display(i)
    
    logging.info(f'Test Epoch {gvar.epoch} Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f} Loss {losses.avg:.6f} sparsity {get_sparsity(model):.3f}')
    tb_write_scalars({
        'Accuracy/top1_eval': top1.avg,
        'Accuracy/top5_eval': top5.avg,
        'Loss/loss_eval': losses.avg,
    }, True)

    return top1.avg


def save_checkpoint(state, is_best):
    filename = gvar.saver.save_dir / "last.pt"
    torch.save(state, filename)
    if is_best:
        tgt_fname = gvar.saver.save_dir / 'best.pt'
        shutil.copyfile(filename, str(tgt_fname))


def tb_write_scalars(data, use_epoch=False):
    step = gvar.epoch if use_epoch else gvar.cur_iter
    for k, v in data.items():
        gvar.tb_writer.add_scalar(k, v, step)


def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res


def update_thres(model, ratio):
    all_mask = []
    n_fc = -1
    for name, mod in model.named_modules():
        if type(mod) in mapping.values():
            all_mask.append(mod.mask.reshape([-1]))
        if 'fc' in name:
            n_fc = np.prod(mod.mask.shape)
    assert n_fc > 0
    
    fc_ratio = ratio * 0.9
    N = torch.cat(all_mask).shape[0]
    n_prune = N * ratio
    n_fc_prune = n_fc * fc_ratio
    n_prune_layers = n_prune - n_fc_prune
    layers_ratio = n_prune_layers / (N - n_fc)
    
    for name, mod in model.named_modules():
        if type(mod) in mapping.values():
            if 'layer' in name:
                k = int(layers_ratio * np.prod(mod.mask.shape))
            elif 'fc' in name:
                k = int(fc_ratio * np.prod(mod.mask.shape))
            else:
                k = 0
            
            if k == 0:
                with torch.no_grad():
                    mod.mask.copy_(torch.ones_like(mod.weight))
            else:
                w_abs = mod.weight.abs()
                v, idx =  torch.topk(w_abs.reshape([-1]), k, largest=False, sorted=True)
                v = v[-1].item()
                with torch.no_grad():
                    mod.mask.copy_((w_abs>v).float())

def get_sparsity(model, pattern='.*'):
    N = 0
    NZ = 0 # nonzero
    for name, mod in model.named_modules():
        if type(mod) in mapping.values() and re.search(pattern, name):
            N += np.prod(mod.weight.shape)
            NZ += (mod.mask.abs() == 0).float().sum().item()
    return NZ / N

if __name__ == '__main__':
    main()