import os
import logging
from tqdm import tqdm
import torch
from torch.utils import data
import torchvision.transforms as transform

import sys
sys.path.insert(0, '../../')

from option import Options
from MainNet.models import get_classification_model
from MainNet.datasets import get_classification_dataset
from MainNet import utils


def test(args):
    logger, console, output_dir = utils.file.create_logger(args, 'val')

    device = 'cuda:{}'.format(0) if torch.cuda.is_available() else 'cpu'
    logger.info('Compute device: ' + device)
    device = torch.device(device)

    # data transforms
    input_transform = transform.Compose([
        transform.ToTensor(),
        transform.Normalize([.5, .5, .5], [.5, .5, .5])])

    # dataset
    data_kwargs = {'logger': logger, 'transform': input_transform,
                   'base_size': args.base_size, 'crop_size': args.crop_size,
                   'crop_vid': args.crop_vid, 'split': args.split,
                   'root': args.data_folder}
    testset = get_classification_dataset(args.dataset, mode='val', **data_kwargs)

    # dataloader
    loader_kwargs = {'num_workers': args.workers, 'pin_memory': True} \
        if args.cuda else {}
    testloader = data.DataLoader(testset, batch_size=1,
                                 drop_last=False, shuffle=True, **loader_kwargs)

    # model
    model_kwargs = {'backbone': args.backbone, 'version': args.version} \
        if args.model == 'mixresnet' else {}
    model = get_classification_model(args.model, **model_kwargs)

    # resuming checkpoint
    if args.resume is None or not os.path.isfile(args.resume):
        raise RuntimeError("=> no checkpoint found at '{}'" .format(args.resume))
    checkpoint = torch.load(args.resume)
    model.load_state_dict(checkpoint['state_dict'])

    logger.info("=> loaded checkpoint '{}' (epoch {})".format(args.resume,
                                                              checkpoint['epoch']))

    # count parameter number
    pytorch_total_params = sum(p.numel() for p in model.parameters())
    logger.info("Total number of parameters: %d" % pytorch_total_params)

    # don't output to stdout anymore when logging
    logging.getLogger('').removeHandler(console)

    # validation
    model.to(device)
    model.eval()
    top1 = utils.AverageMeter('acc@1', ':6.2f')

    tbar = tqdm(testloader, desc='\r')
    for i, (video, target) in enumerate(tbar):
        video = video.to(device)
        target = target.to(device)
        with torch.no_grad():
            pred = model(video)
            acc1 = utils.accuracy(pred, target, topk=(1, 1))
            top1.update(acc1[0], 1)
        tbar.set_description(
            'acc1: %.3f' % top1.avg)

    logger.info('acc1: %.3f' % top1.avg)


if __name__ == "__main__":
    args = Options().parse()
    torch.manual_seed(args.seed)
    test(args)
