import os
import random
import time
import warnings

import torch
import torch.backends.cudnn as cudnn
from tensorboardX import SummaryWriter
from torch import optim, nn, hub

from config import Config
from training import models
from training.datasets.oulu_npu_dataset import get_oulu_npu_dataloader
from training.losses import TripletLoss
from training.models.compactnet import compactnet
from training.tools.metrics import AverageMeter, ProgressMeter
from training.tools.train_utils import parse_args, validate

cudnn.benchmark = True

CONFIG = Config()
hub.set_dir(CONFIG['TORCH_HOME'])

global_step = 1


def get_inf_iterator(data_loader):
    """Inf data iterator."""
    while True:
        for sample in data_loader:
            yield sample


def train_v2(train_real_iter, train_fake_iter, iter_num, model, optimizer, criterion, epoch, writer, args):
    global global_step
    batch_time = AverageMeter('Time', ':6.3f')
    losses = AverageMeter('Loss', ':.4e')
    progress = ProgressMeter(iter_num, [batch_time, losses], prefix="Epoch: [{}]".format(epoch))

    model.train()

    end = time.time()
    for batch_idx in range(iter_num):
        sample_real = next(train_real_iter)
        sample_fake = next(train_fake_iter)
        images = torch.cat([sample_real['image'], sample_fake['image']], dim=0).cuda()
        labels = torch.cat([sample_real['label'], sample_fake['label']], dim=0).cuda()

        shuffle_idx = list(range(len(images)))
        random.shuffle(shuffle_idx)

        images = images[shuffle_idx, :]
        labels = labels[shuffle_idx]

        # compute output
        outputs = model(images)
        loss = criterion(outputs, labels)

        # record loss
        losses.update(loss.item(), images.size(0))

        # compute gradient and do Adam step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        batch_time.update(time.time() - end)
        end = time.time()
        global_step += 1
        if (batch_idx + 1) % args.print_freq == 0:
            progress.display(batch_idx + 1)
            writer.add_scalar('Loss/train', loss.item(), global_step)
            writer.add_scalar('Learning rate', optimizer.param_groups[0]['lr'], global_step)
            writer.add_images('Images', images, global_step)
            writer.add_embedding(outputs, metadata=labels, label_img=images, global_step=global_step)


def main():
    args = parse_args()
    print(args)
    time_stp = time.strftime("%Y-%m-%d_%H:%M:%S", time.localtime())
    writer = SummaryWriter(log_dir=os.path.join('runs', time_stp))

    if args.seed is not None:
        random.seed(args.seed)
        torch.manual_seed(args.seed)
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training.' +
                      'This will turn on the CUDNN deterministic setting, ' +
                      'which can slow down your training considerably! ' +
                      'You may see unexpected behavior when restarting from checkpoints.')

    model = compactnet(feature_extractor=models.__dict__[args.arch], pretrained=True)
    # model = discriminativecolorspace(feature_extractor=models.__dict__[args.arch], pretrained=True)

    train_real_loader, train_fake_loader, val_loader = get_oulu_npu_dataloader(model, args, split_real_fake=True)

    if args.gpu is not None:
        torch.cuda.set_device(args.gpu)
        model = model.cuda(args.gpu)
    else:
        model = torch.nn.DataParallel(model).cuda()

    optimizer = optim.Adam(model.parameters(), lr=args.lr)
    # optimizer = torch.optim.SGD(model.parameters(), lr=args.lr, weight_decay=args.weight_decay, momentum=args.momentum)
    # scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'max', patience=2)
    criterion = TripletLoss().cuda()

    iter_num = max(len(train_real_loader), len(train_fake_loader))
    start_epoch = 1
    best_prec1 = 0.
    for epoch in range(start_epoch, args.epochs + 1):
        train_real_iter = get_inf_iterator(train_real_loader)
        train_fake_iter = get_inf_iterator(train_fake_loader)
        train_v2(train_real_iter, train_fake_iter, iter_num, model, optimizer, criterion, epoch, writer, args)
        if epoch % 2 == 0 or epoch == args.epochs:
            acc1 = validate(val_loader, model, criterion, epoch, args)
            best_prec1 = max(acc1, best_prec1)
        save_model = ((epoch == args.epochs) or (epoch % 5 == 0))
        if save_model:
            print("Save model...")
            torch.save({
                'epoch': epoch,
                'arch': args.arch,
                'state_dict': model.state_dict(),
                'best_prec1': best_prec1,
                'optimizer': optimizer.state_dict(),
            }, os.path.join('weights', '{}_{}_{}.pt'.format(args.arch, args.prefix, epoch)))
    writer.close()


if __name__ == '__main__':
    main()
