import argparse
import os
import random
import shutil
import time
import warnings
from enum import Enum
from torch.utils.tensorboard import SummaryWriter
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.distributed as dist
import torch.optim
from torch.optim.lr_scheduler import StepLR
import torch.multiprocessing as mp
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import torchvision.models as models
import matplotlib.pyplot as plt
import numpy as np

labels_map = {0: 'goldfish, Carassius auratus', 1: 'European fire salamander, Salamandra salamandra',
              2: 'bullfrog, Rana catesbeiana', 3: 'tailed frog, bell toad, ribbed toad, tailed toad, Ascaphus trui',
              4: 'American alligator, Alligator mississipiensis', 5: 'boa constrictor, Constrictor constrictor',
              6: 'trilobite', 7: 'scorpion', 8: 'black widow, Latrodectus mactans', 9: 'tarantula', 10: 'centipede',
              11: 'goose', 12: 'koala, koala bear, kangaroo bear, native bear, Phascolarctos cinereus', 13: 'jellyfish',
              14: 'brain coral', 15: 'snail', 16: 'slug', 17: 'sea slug, nudibranch',
              18: 'American lobster, Northern lobster, Maine lobster, Homarus americanus',
              19: 'spiny lobster, langouste, rock lobster, crawfish, crayfish, sea crawfish',
              20: 'black stork, Ciconia nigra', 21: 'king penguin, Aptenodytes patagonica', 22: 'albatross, mollymawk',
              23: 'dugong, Dugong dugon', 24: 'Chihuahua', 25: 'Yorkshire terrier', 26: 'golden retriever',
              27: 'Labrador retriever', 28: 'German shepherd, German shepherd dog, German police dog, alsatian',
              29: 'standard poodle', 30: 'tabby, tabby cat', 31: 'Persian cat', 32: 'Egyptian cat',
              33: 'cougar, puma, catamount, mountain lion, painter, panther, Felis concolor',
              34: 'lion, king of beasts, Panthera leo', 35: 'brown bear, bruin, Ursus arctos',
              36: 'ladybug, ladybeetle, lady beetle, ladybird, ladybird beetle', 37: 'fly', 38: 'bee',
              39: 'grasshopper, hopper', 40: 'walking stick, walkingstick, stick insect', 41: 'cockroach, roach',
              42: 'mantis, mantid',
              43: "dragonfly, darning needle, devil's darning needle, sewing needle, snake feeder, snake doctor, mosquito hawk, skeeter hawk",
              44: 'monarch, monarch butterfly, milkweed butterfly, Danaus plexippus',
              45: 'sulphur butterfly, sulfur butterfly', 46: 'sea cucumber, holothurian',
              47: 'guinea pig, Cavia cobaya', 48: 'hog, pig, grunter, squealer, Sus scrofa', 49: 'ox', 50: 'bison',
              51: 'bighorn, bighorn sheep, cimarron, Rocky Mountain bighorn, Rocky Mountain sheep, Ovis canadensis',
              52: 'gazelle', 53: 'Arabian camel, dromedary, Camelus dromedarius',
              54: 'orangutan, orang, orangutang, Pongo pygmaeus', 55: 'chimpanzee, chimp, Pan troglodytes',
              56: 'baboon', 57: 'African elephant, Loxodonta africana',
              58: 'lesser panda, red panda, panda, bear cat, cat bear, Ailurus fulgens', 59: 'abacus',
              60: "academic gown, academic robe, judge's robe", 61: 'altar', 62: 'apron',
              63: 'backpack, back pack, knapsack, packsack, rucksack, haversack',
              64: 'bannister, banister, balustrade, balusters, handrail', 65: 'barbershop', 66: 'barn',
              67: 'barrel, cask', 68: 'basketball', 69: 'bathtub, bathing tub, bath, tub',
              70: 'beach wagon, station wagon, wagon, estate car, beach waggon, station waggon, waggon',
              71: 'beacon, lighthouse, beacon light, pharos', 72: 'beaker', 73: 'beer bottle', 74: 'bikini, two-piece',
              75: 'binoculars, field glasses, opera glasses', 76: 'birdhouse', 77: 'bow tie, bow-tie, bowtie',
              78: 'brass, memorial tablet, plaque', 79: 'broom', 80: 'bucket, pail', 81: 'bullet train, bullet',
              82: 'butcher shop, meat market', 83: 'candle, taper, wax light', 84: 'cannon', 85: 'cardigan',
              86: 'cash machine, cash dispenser, automated teller machine, automatic teller machine, automated teller, automatic teller, ATM',
              87: 'CD player', 88: 'chain', 89: 'chest', 90: 'Christmas stocking', 91: 'cliff dwelling',
              92: 'computer keyboard, keypad', 93: 'confectionery, confectionary, candy store', 94: 'convertible',
              95: 'crane', 96: 'dam, dike, dyke', 97: 'desk', 98: 'dining table, board', 99: 'drumstick',
              100: 'dumbbell', 101: 'flagpole, flagstaff', 102: 'fountain', 103: 'freight car',
              104: 'frying pan, frypan, skillet', 105: 'fur coat', 106: 'gasmask, respirator, gas helmet',
              107: 'go-kart', 108: 'gondola', 109: 'hourglass', 110: 'iPod', 111: 'jinrikisha, ricksha, rickshaw',
              112: 'kimono', 113: 'lampshade, lamp shade', 114: 'lawn mower, mower', 115: 'lifeboat',
              116: 'limousine, limo', 117: 'magnetic compass', 118: 'maypole', 119: 'military uniform',
              120: 'miniskirt, mini', 121: 'moving van', 122: 'nail', 123: 'neck brace', 124: 'obelisk',
              125: 'oboe, hautboy, hautbois', 126: 'organ, pipe organ', 127: 'parking meter',
              128: 'pay-phone, pay-station', 129: 'picket fence, paling', 130: 'pill bottle',
              131: "plunger, plumber's helper", 132: 'pole',
              133: 'police van, police wagon, paddy wagon, patrol wagon, wagon, black Maria', 134: 'poncho',
              135: 'pop bottle, soda bottle', 136: "potter's wheel", 137: 'projectile, missile',
              138: 'punching bag, punch bag, punching ball, punchball', 139: 'reel', 140: 'refrigerator, icebox',
              141: 'remote control, remote', 142: 'rocking chair, rocker', 143: 'rugby ball', 144: 'sandal',
              145: 'school bus', 146: 'scoreboard', 147: 'sewing machine', 148: 'snorkel', 149: 'sock', 150: 'sombrero',
              151: 'space heater', 152: "spider web, spider's web", 153: 'sports car, sport car',
              154: 'steel arch bridge', 155: 'stopwatch, stop watch', 156: 'sunglasses, dark glasses, shades',
              157: 'suspension bridge', 158: 'swimming trunks, bathing trunks', 159: 'syringe', 160: 'teapot',
              161: 'teddy, teddy bear', 162: 'thatch, thatched roof', 163: 'torch', 164: 'tractor',
              165: 'triumphal arch', 166: 'trolleybus, trolley coach, trackless trolley', 167: 'turnstile',
              168: 'umbrella', 169: 'vestment', 170: 'viaduct', 171: 'volleyball', 172: 'water jug', 173: 'water tower',
              174: 'wok', 175: 'wooden spoon', 176: 'comic book', 177: 'plate', 178: 'guacamole',
              179: 'ice cream, icecream', 180: 'ice lolly, lolly, lollipop, popsicle', 181: 'pretzel',
              182: 'mashed potato', 183: 'cauliflower', 184: 'bell pepper', 185: 'mushroom', 186: 'orange',
              187: 'lemon', 188: 'banana', 189: 'pomegranate', 190: 'meat loaf, meatloaf', 191: 'pizza, pizza pie',
              192: 'potpie', 193: 'espresso', 194: 'alp', 195: 'cliff, drop, drop-off', 196: 'coral reef',
              197: 'lakeside, lakeshore', 198: 'seashore, coast, seacoast, sea-coast', 199: 'acorn'}

model_names = sorted(name for name in models.__dict__
                     if name.islower() and not name.startswith("__")
                     and callable(models.__dict__[name]))

parser = argparse.ArgumentParser(description='PyTorch ImageNet Training')
parser.add_argument('data', metavar='DIR', default='tiny-imagenet-200',
                    help='path to dataset (default: imagenet)')
parser.add_argument('-a', '--arch', metavar='ARCH', default='resnet18',
                    choices=model_names,
                    help='model architecture: ' +
                         ' | '.join(model_names) +
                         ' (default: resnet18)')
parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                    help='number of data loading workers (default: 4)')
parser.add_argument('--epochs', default=20, type=int, metavar='N',
                    help='number of total epochs to run')
parser.add_argument('--start-epoch', default=0, type=int, metavar='N',
                    help='manual epoch number (useful on restarts)')
parser.add_argument('-b', '--batch-size', default=200, type=int,
                    metavar='N',
                    help='mini-batch size (default: 256), this is the total '
                         'batch size of all GPUs on the current node when '
                         'using Data Parallel or Distributed Data Parallel')
parser.add_argument('--lr', '--learning-rate', default=0.1, type=float,
                    metavar='LR', help='initial learning rate', dest='lr')
parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                    help='momentum')
parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                    metavar='W', help='weight decay (default: 1e-4)',
                    dest='weight_decay')
parser.add_argument('-p', '--print-freq', default=10, type=int,
                    metavar='N', help='print frequency (default: 10)')
parser.add_argument('--resume', default='', type=str, metavar='PATH',
                    help='path to latest checkpoint (default: none)')
parser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true',
                    help='evaluate model on validation set')
parser.add_argument('--pretrained', dest='pretrained', action='store_true',
                    help='use pre-trained model')
parser.add_argument('--world-size', default=-1, type=int,
                    help='number of nodes for distributed training')
parser.add_argument('--rank', default=-1, type=int,
                    help='node rank for distributed training')
parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
                    help='url used to set up distributed training')
parser.add_argument('--dist-backend', default='nccl', type=str,
                    help='distributed backend')
parser.add_argument('--seed', default=None, type=int,
                    help='seed for initializing training. ')
parser.add_argument('--gpu', default=None, type=int,
                    help='GPU id to use.')
parser.add_argument('--multiprocessing-distributed', action='store_true',
                    help='Use multi-processing distributed training to launch '
                         'N processes per node, which has N GPUs. This is the '
                         'fastest way to use PyTorch for either single node or '
                         'multi node data parallel training')
parser.add_argument('--test', dest='test', action='store_true',
                    help='test model on validation set')

best_acc1 = 0


def main():
    args = parser.parse_args()
    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if args.gpu is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    if args.dist_url == "env://" and args.world_size == -1:
        args.world_size = int(os.environ["WORLD_SIZE"])

    args.distributed = args.world_size > 1 or args.multiprocessing_distributed

    ngpus_per_node = torch.cuda.device_count()
    if args.multiprocessing_distributed:
        # Since we have ngpus_per_node processes per node, the total world_size
        # needs to be adjusted accordingly
        args.world_size = ngpus_per_node * args.world_size
        # Use torch.multiprocessing.spawn to launch distributed processes: the
        # main_worker process function
        mp.spawn(main_worker, nprocs=ngpus_per_node, args=(ngpus_per_node, args))
    else:
        # Simply call main_worker function
        main_worker(args.gpu, ngpus_per_node, args)


def main_worker(gpu, ngpus_per_node, args):
    global best_acc1
    args.gpu = gpu
    if args.gpu is not None:
        print("Use GPU: {} for training".format(args.gpu))

    if args.distributed:
        if args.dist_url == "env://" and args.rank == -1:
            args.rank = int(os.environ["RANK"])
        if args.multiprocessing_distributed:
            # For multiprocessing distributed training, rank needs to be the
            # global rank among all the processes
            args.rank = args.rank * ngpus_per_node + gpu
        dist.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
                                world_size=args.world_size, rank=args.rank)
    # create model
    if args.pretrained:
        print("=> using pre-trained model '{}'".format(args.arch))
        model = models.__dict__[args.arch](pretrained=True)
    else:
        print("=> creating model '{}'".format(args.arch))
        model = models.__dict__[args.arch]()

    if not torch.cuda.is_available():
        print('using CPU, this will be slow')
    elif args.distributed:
        # For multiprocessing distributed, DistributedDataParallel constructor
        # should always set the single device scope, otherwise,
        # DistributedDataParallel will use all available devices.
        if args.gpu is not None:
            torch.cuda.set_device(args.gpu)
            model.cuda(args.gpu)
            # When using a single GPU per process and per
            # DistributedDataParallel, we need to divide the batch size
            # ourselves based on the total number of GPUs of the current node.
            args.batch_size = int(args.batch_size / ngpus_per_node)
            args.workers = int((args.workers + ngpus_per_node - 1) / ngpus_per_node)
            model = torch.nn.parallel.DistributedDataParallel(model, device_ids=[args.gpu])
        else:
            model.cuda()
            # DistributedDataParallel will divide and allocate batch_size to all
            # available GPUs if device_ids are not set
            model = torch.nn.parallel.DistributedDataParallel(model)
    elif args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        # DataParallel will divide and allocate batch_size to all available GPUs
        if args.arch.startswith('alexnet') or args.arch.startswith('vgg'):
            model.features = torch.nn.DataParallel(model.features)
            model.cuda()
        else:
            model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion), optimizer, and learning rate scheduler
    criterion = nn.CrossEntropyLoss().cuda(args.gpu)

    optimizer = torch.optim.SGD(model.parameters(), args.lr,
                                momentum=args.momentum,
                                weight_decay=args.weight_decay)

    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    scheduler = StepLR(optimizer, step_size=30, gamma=0.1)

    # optionally resume from a checkpoint
    if args.resume:
        if os.path.isfile(args.resume):
            print("=> loading checkpoint '{}'".format(args.resume))
            if args.gpu is None:
                checkpoint = torch.load(args.resume)
            else:
                # Map model to be loaded to specified single gpu.
                loc = 'cuda:{}'.format(args.gpu)
                checkpoint = torch.load(args.resume, map_location=loc)
            args.start_epoch = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if args.gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(args.gpu)
            model.load_state_dict(checkpoint['state_dict'])
            optimizer.load_state_dict(checkpoint['optimizer'])
            scheduler.load_state_dict(checkpoint['scheduler'])
            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(args.resume, checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(args.resume))

    cudnn.benchmark = True

    writer = SummaryWriter('runs/tiny_imagenet_experiment1')
    # Data loading code
    traindir = os.path.join(args.data, 'train')
    valdir = os.path.join(args.data, 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))
    if args.distributed:
        train_sampler = torch.utils.data.distributed.DistributedSampler(train_dataset)
    else:
        train_sampler = None
    val_dataset = datasets.ImageFolder(
        valdir,
        transforms.Compose([
            transforms.ToTensor(),
            normalize,
        ]))
    '''
    while True:
        figure = plt.figure(figsize=(20, 8))
        for i in range(1, 4 * 4 + 1):
            sample_idx = torch.randint(len(val_dataset), size=(1,)).item()
            img, label = val_dataset[sample_idx]
            figure.add_subplot(4, 4, i)
            name=str(labels_map[label])+'\t'+str(sample_idx)
            plt.title(name)
            plt.axis("off")
            img = img.numpy()
            img = np.transpose(img, (1, 2, 0))
            plt.imshow(img)
        plt.show()
    exit()
    '''
    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=args.batch_size, shuffle=(train_sampler is None),
        num_workers=args.workers, pin_memory=True, sampler=train_sampler)
    '''
    for (images, target) in train_loader:
        writer.add_graph(model, images)
        writer.flush()
        break
    '''
    val_loader = torch.utils.data.DataLoader(
        val_dataset,
        batch_size=args.batch_size, shuffle=False,
        num_workers=args.workers, pin_memory=True)

    if args.evaluate:
        validate(val_loader, model, criterion, args, writer=None, epoch=None)
        return

    if args.test:
        test(val_loader, model, args)
        return

    for epoch in range(args.start_epoch, args.epochs):
        if args.distributed:
            train_sampler.set_epoch(epoch)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, args, writer)

        # evaluate on validation set
        acc1 = validate(val_loader, model, criterion, args, writer, epoch)

        scheduler.step()

        # remember best acc@1 and save checkpoint
        is_best = acc1 > best_acc1
        best_acc1 = max(acc1, best_acc1)

        if not args.multiprocessing_distributed or (args.multiprocessing_distributed
                                                    and args.rank % ngpus_per_node == 0):
            save_checkpoint({
                'epoch': epoch + 1,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_acc1': best_acc1,
                'optimizer': optimizer.state_dict(),
                'scheduler': scheduler.state_dict()
            }, is_best)
    writer.close()


def train(train_loader, model, criterion, optimizer, epoch, args, writer):
    batch_time = AverageMeter('Time', ':6.3f')
    data_time = AverageMeter('Data', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    top5 = AverageMeter('Acc@5', ':6.2f')
    progress = ProgressMeter(
        len(train_loader),
        [batch_time, data_time, losses, top1, top5],
        prefix="Epoch: [{}]".format(epoch))

    # switch to train mode
    model.train()
    end = time.time()
    for i, (images, target) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        if args.gpu is not None:
            images = images.cuda(args.gpu, non_blocking=True)
        if torch.cuda.is_available():
            target = target.cuda(args.gpu, non_blocking=True)

        # compute output
        output = model(images)

        loss = criterion(output, target)

        # measure accuracy and record loss
        acc1, acc5 = accuracy(output, target, topk=(1, 5))
        losses.update(loss.item(), images.size(0))
        top1.update(acc1[0], images.size(0))
        top5.update(acc5[0], images.size(0))
        if writer != None:
            if i % 100 == 0:  # every 100 mini-batches...
                # ...log the running loss
                writer.add_scalar('training loss',
                                  loss,
                                  epoch * len(train_loader) + i)
                writer.add_scalar('training acc1',
                                  acc1,
                                  epoch * len(train_loader) + i)
                writer.add_scalar('training acc5',
                                  acc5,
                                  epoch * len(train_loader) + i)

        # compute gradient and do SGD step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % args.print_freq == 0:
            progress.display(i)


def test(val_loader, model, args):
    with open('test1.txt','w+') as f:
        for i, (images, target) in enumerate(val_loader):
            if args.gpu is not None:
                images = images.cuda(args.gpu, non_blocking=True)
            if torch.cuda.is_available():
                target = target.cuda(args.gpu, non_blocking=True)

            # compute output
            output = model(images)
            preds = torch.argmax(output, dim=1)

            #打出预测结果
            for i in range(200):
                label1 = int(preds[i].item())
                label2 = int(target[i].item())
                f.write('predict:' + labels_map[label1] + '\t' + 'acturally:' + labels_map[label2]+'\n')

            #图片展示
            j = 1
            figure = plt.figure(figsize=(16, 8))
            for i in range(16):
                label1 = int(preds[i].item())
                label2 = int(target[i].item())
                figure.add_subplot(4, 4, j)
                j = j + 1
                name = 'predict:' + labels_map[label1] + '\n' + 'acturally:' + labels_map[label2]
                plt.title(name)
                plt.axis("off")
                img = images[i]
                img = img.numpy()
                img = np.transpose(img, (1, 2, 0))
                plt.imshow(img)
            plt.show()


def validate(val_loader, model, criterion, args, writer, epoch):
    batch_time = AverageMeter('Time', ':6.3f', Summary.NONE)
    losses = AverageMeter('Loss', ':.4e', Summary.NONE)
    top1 = AverageMeter('Acc@1', ':6.2f', Summary.AVERAGE)
    top5 = AverageMeter('Acc@5', ':6.2f', Summary.AVERAGE)
    progress = ProgressMeter(
        len(val_loader),
        [batch_time, losses, top1, top5],
        prefix='Test: ')

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (images, target) in enumerate(val_loader):
            if args.gpu is not None:
                images = images.cuda(args.gpu, non_blocking=True)
            if torch.cuda.is_available():
                target = target.cuda(args.gpu, non_blocking=True)

            # compute output
            output = model(images)
            loss = criterion(output, target)

            # measure accuracy and record loss
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), images.size(0))
            top1.update(acc1[0], images.size(0))
            top5.update(acc5[0], images.size(0))
            if writer != None:
                if i % 10 == 0:  # every 10 mini-batches...

                    # ...log the running loss
                    writer.add_scalar('testing loss',
                                      loss,
                                      epoch * len(val_loader) + i)
                    writer.add_scalar('testing acc1',
                                      acc1,
                                      epoch * len(val_loader) + i)
                    writer.add_scalar('testing acc5',
                                      acc5,
                                      epoch * len(val_loader) + i)
            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % args.print_freq == 0:
                progress.display(i)

        progress.display_summary()

    return top1.avg


def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
    torch.save(state, filename)
    if is_best:
        shutil.copyfile(filename, 'model_best.pth.tar')


class Summary(Enum):
    NONE = 0
    AVERAGE = 1
    SUM = 2
    COUNT = 3


class AverageMeter(object):
    """Computes and stores the average and current value"""

    def __init__(self, name, fmt=':f', summary_type=Summary.AVERAGE):
        self.name = name
        self.fmt = fmt
        self.summary_type = summary_type
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count

    def __str__(self):
        fmtstr = '{name} {val' + self.fmt + '} ({avg' + self.fmt + '})'
        return fmtstr.format(**self.__dict__)

    def summary(self):
        fmtstr = ''
        if self.summary_type is Summary.NONE:
            fmtstr = ''
        elif self.summary_type is Summary.AVERAGE:
            fmtstr = '{name} {avg:.3f}'
        elif self.summary_type is Summary.SUM:
            fmtstr = '{name} {sum:.3f}'
        elif self.summary_type is Summary.COUNT:
            fmtstr = '{name} {count:.3f}'
        else:
            raise ValueError('invalid summary type %r' % self.summary_type)

        return fmtstr.format(**self.__dict__)


class ProgressMeter(object):
    def __init__(self, num_batches, meters, prefix=""):
        self.batch_fmtstr = self._get_batch_fmtstr(num_batches)
        self.meters = meters
        self.prefix = prefix

    def display(self, batch):
        entries = [self.prefix + self.batch_fmtstr.format(batch)]
        entries += [str(meter) for meter in self.meters]
        print('\t'.join(entries))

    def display_summary(self):
        entries = [" *"]
        entries += [meter.summary() for meter in self.meters]
        print(' '.join(entries))

    def _get_batch_fmtstr(self, num_batches):
        num_digits = len(str(num_batches // 1))
        fmt = '{:' + str(num_digits) + 'd}'
        return '[' + fmt + '/' + fmt.format(num_batches) + ']'


def accuracy(output, target, topk=(1,)):
    """Computes the accuracy over the k top predictions for the specified values of k"""
    with torch.no_grad():
        maxk = max(topk)
        batch_size = target.size(0)

        _, pred = output.topk(maxk, 1, True, True)
        pred = pred.t()
        correct = pred.eq(target.view(1, -1).expand_as(pred))

        res = []
        for k in topk:
            correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
            res.append(correct_k.mul_(100.0 / batch_size))
        return res


if __name__ == '__main__':
    main()
