import argparse
import time

import matplotlib.pyplot as plt
import numpy as np
import torch

from training import models
from training.models.attention import AttentionFusion
from training.tools.metrics import AverageMeter, ProgressMeter, accuracy

model_names = sorted(name for name in models.__dict__
                     if name.islower() and not name.startswith("__")
                     and callable(models.__dict__[name]))


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument('--data-dir', metavar='DIR', help='path to dataset')
    parser.add_argument('--arch', metavar='ARCH', default='xception', choices=model_names,
                        help='model architecture: ' + ' | '.join(model_names) + ' (default: xception)')
    parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
                        help='number of data loading workers (default: 4)')
    parser.add_argument('--prefix', type=str, default='oulu_npu', help='(default: oulu_npu)')
    parser.add_argument('--epochs', type=int, default=10, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--batch-size', type=int, default=100, metavar='N', help='batch size')
    parser.add_argument('--lr', '--learning-rate', default=1e-4, type=float,
                        metavar='LR', help='initial learning rate', dest='lr')
    parser.add_argument('--momentum', default=0.9, type=float, metavar='M',
                        help='momentum')
    parser.add_argument('--wd', '--weight-decay', default=1e-4, type=float,
                        metavar='W', help='weight decay (default: 1e-4)',
                        dest='weight_decay')
    parser.add_argument('--print-freq', type=int, default=10, metavar='N',
                        help='how many batches to wait before logging training status')
    parser.add_argument('--evaluate', dest='evaluate', action='store_true',
                        help='evaluate model on validation set')
    parser.add_argument('--resume', default='', type=str, metavar='PATH',
                        help='path to latest checkpoint (default: none)')

    parser.add_argument('--world-size', default=-1, type=int,
                        help='number of nodes for distributed training')
    parser.add_argument('--rank', default=-1, type=int,
                        help='node rank for distributed training')
    parser.add_argument('--dist-url', default='tcp://224.66.41.62:23456', type=str,
                        help='url used to set up distributed training')
    parser.add_argument('--dist-backend', default='nccl', type=str,
                        help='distributed backend')
    parser.add_argument('--seed', default=None, type=int,
                        help='seed for initializing training. ')
    parser.add_argument('--gpu', default=None, type=int,
                        help='GPU id to use.')
    parser.add_argument('--multiprocessing-distributed', action='store_true',
                        help='Use multi-processing distributed training to launch '
                             'N processes per node, which has N GPUs. This is the '
                             'fastest way to use PyTorch for either single node or '
                             'multi node data parallel training')

    args = parser.parse_args()
    return args


def train(train_loader, model, optimizer, criterion, epoch, args):
    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    progress = ProgressMeter(len(train_loader), [batch_time, losses, top1], prefix="Epoch: [{}]".format(epoch))

    model.train()

    end = time.time()
    for batch_idx, sample in enumerate(train_loader):
        images = sample['image'].cuda()
        images_msr = sample['msr'].cuda()
        labels = sample['label'].cuda()

        # compute output
        if isinstance(model, AttentionFusion):
            output = model(images, images_msr)
        else:
            output = model(images)
        loss = criterion(output, labels)

        # measure accuracy and record loss
        acc1, = accuracy(output.data, labels)
        top1.update(acc1[0], images.size(0))
        losses.update(loss.item(), images.size(0))

        # compute gradient and do Adam step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()
        lr = optimizer.param_groups[0]['lr']

        if (batch_idx + 1) % args.print_freq == 0:
            progress.display(batch_idx + 1)


def validate(val_loader, model, criterion, epoch, args):
    batch_time = AverageMeter('Time', ':6.3f')
    # losses = AverageMeter('Loss', ':.4e')
    top1 = AverageMeter('Acc@1', ':6.2f')
    progress = ProgressMeter(len(val_loader), [batch_time, top1], prefix='Validation: ')

    model.eval()

    end = time.time()
    with torch.no_grad():
        for batch_idx, sample in enumerate(val_loader):
            with torch.no_grad():
                image = sample['image'].cuda()
                images_msr = sample['msr'].cuda()
                labels = sample['label'].cuda()

                # compute output
                if isinstance(model, AttentionFusion):
                    output = model(image, images_msr)
                else:
                    output = model(image)
                loss = criterion(output, labels)

                # measure accuracy and record loss
                acc1, = accuracy(output.data, labels)
                top1.update(acc1[0], image.size(0))

                # measure elapsed time
                batch_time.update(time.time() - end)
                end = time.time()
                if (batch_idx + 1) % args.print_freq == 0:
                    progress.display(batch_idx + 1)

    return top1.avg


def imshow(data):
    """Imshow for Tensor."""
    rgb_tensors = data['rgb']
    msr_tensors = data['msr']
    fig = plt.figure(figsize=(8, 8))
    for index, sample in enumerate(zip(rgb_tensors, msr_tensors)):
        rgb_tensor, msr_tensor = sample
        # rgb
        inp = rgb_tensor.numpy().transpose((1, 2, 0))
        mean = np.array([0.485, 0.456, 0.406])
        std = np.array([0.229, 0.224, 0.225])
        inp = std * inp + mean
        inp = np.clip(inp, 0, 1)
        fig.add_subplot(4, 4, 2 * index + 1)
        plt.imshow(inp)
        # msr
        fig.add_subplot(4, 4, 2 * index + 2)
        plt.imshow(msr_tensor[:, :, 0], cmap='gray')
    plt.show()
