import sys
sys.path.append('./')

import argparse
import shutil
import time
import yaml
import json
from easydict import EasyDict as edict
import logging

import torch
import torch.nn as nn
import torch.backends.cudnn as cudnn
from torch.utils.tensorboard import SummaryWriter

from utils import *
from utils.train_utils import accuracy

cudnn.benchmark = True
assert torch.cuda.is_available()

parser = argparse.ArgumentParser(description='PyTorch Image Classification Training')
parser.add_argument('config', default='configs/res18.yaml', type=str, nargs='?', help='config file path')
parser.add_argument('--resume', type=str, help='ckpt file path')
parser.add_argument('--gpus', default="0,1,3", type=str, help='GPUs id to use. separated by ,')

gvar = edict({
    'args': None,
    'cmd_args': None,
})

def main():
    cmd_args = parser.parse_args()
    cmd_args = edict(cmd_args.__dict__)
    cmd_args.gpus = [int(x) for x in cmd_args.gpus.split(',')]

    args = yaml.load(open(cmd_args.config), yaml.Loader)
    args = edict(args)
    

    set_logger()
    logging.info("cmd_args: " + json.dumps(cmd_args, indent=4, sort_keys=True))
    logging.info("args: " + json.dumps(args, indent=4, sort_keys=True))

    gvar.args = args
    gvar.cmd_args = cmd_args
    if args.seed is not None:
        set_seeds(args.seed)

    print("Use GPU: {}".format(cmd_args.gpus))

    # Data loading code
    val_dataset = build_dataset(args.dataset, 'val')
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=args.train.batch_size,
        shuffle=False,
        num_workers=args.train.workers,
        pin_memory=True
    )


    # create model
    model = build_model(args.model)

    torch.cuda.set_device(cmd_args.gpus[0])
    model = model.cuda()
    if len(cmd_args.gpus) > 1:
        model = torch.nn.DataParallel(model.cuda(), device_ids=cmd_args.gpus)

    # define loss function (criterion) and optimizer
    criterion = nn.CrossEntropyLoss().cuda()

    # optionally resume from a checkpoint
    logging.info(f'resuming from ckpt: {cmd_args.resume}')
    resume_from_ckpt(model, None, cmd_args.resume, device=torch.device(f'cuda:{cmd_args.gpus[0]}'))


    validate(val_loader, model, criterion)
    

def validate(val_loader, model, criterion):
    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(
        len(val_loader),
        [batch_time, losses, top1, top5],
        prefix='Test: ')

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            images = images.cuda(non_blocking=True)
            target = target.cuda(non_blocking=True)

            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), images.size(0))
            top1.update(acc1[0], images.size(0))
            top5.update(acc5[0], images.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()
    
    logging.info(f'Test Acc@1 {top1.avg:.3f} Acc@5 {top5.avg:.3f} Loss {losses.avg:.6f}')

    return top1.avg


if __name__ == '__main__':
    main()