import os
import sys
import random
import shutil
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets

from net.ade.ade32 import Encoder, Decoder
from net.ade.train.options import opts
from tensorboardX import SummaryWriter
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
PathProject = os.path.split(rootPath)[0]
sys.path.append(rootPath)
sys.path.append(PathProject)

best_acc1 = 0
writer = SummaryWriter()        # 用于可视化

def main():
    if opts['seed'] is not None:
        random.seed(opts['seed'])
        torch.manual_seed(opts['seed'])
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if opts['gpu'] is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    ngpus_per_node = torch.cuda.device_count()
    # Simply call main_worker function
    main_worker(opts['gpu'], ngpus_per_node, opts)


def main_worker(gpu, ngpus_per_node, opts):
    global best_acc1
    if gpu is not None:
        print("Use GPU: {} for training".format(opts['gpu']))

    # create model
    # model = ADEVGG16(tr=True)
    encoder = Encoder(tr=True)
    decoder = Decoder()

    if gpu is not None:
        torch.cuda.set_device(gpu)
        encoder = encoder.cuda(gpu)
        decoder = decoder.cuda(gpu)
    else:
        encoder = torch.nn.DataParallel(encoder).cuda()
        decoder = torch.nn.DataParallel(decoder).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.MSELoss().cuda(gpu)
    encoder_optimizer = torch.optim.SGD(encoder.parameters(), opts['lr'],
                                momentum=opts['momentum'],
                                weight_decay=opts['weight-decay'])
    decoder_optimizer = torch.optim.SGD(decoder.parameters(), opts['lr'],
                                momentum=opts['momentum'],
                                weight_decay=opts['weight-decay'])

    # 从断点恢复
    if opts['resume']:
        if os.path.isfile(opts['resume']):
            print("=> loading checkpoint '{}'".format(opts['resume']))
            checkpoint = torch.load(opts['resume'])
            opts['start_epoch'] = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(gpu)
            encoder.load_state_dict(checkpoint['encoder_state_dict'])
            decoder.load_state_dict(checkpoint['decoder_state_dict'])

            encoder_optimizer.load_state_dict(checkpoint['encoder_optimizer'])
            decoder_optimizer.load_state_dict(checkpoint['decoder_optimizer'])

            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(opts['resume'], checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(opts['resume']))
    cudnn.benchmark = True

    # Data loading code
    traindir = os.path.join(opts['data'], 'train')
    valdir = os.path.join(opts['data'], 'val')
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    train_dataset = datasets.ImageFolder(
        traindir,
        transforms.Compose([
            transforms.RandomResizedCrop(opts['img_size']),
            transforms.RandomHorizontalFlip(),
            transforms.ToTensor(),
            normalize,
        ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=opts['batch_size'], shuffle=True,
        num_workers=opts['workers'], pin_memory=True, sampler=None)

    val_loader = torch.utils.data.DataLoader(
        datasets.ImageFolder(valdir, transforms.Compose([
            transforms.Resize(32 + opts['img_size']),
            transforms.CenterCrop(opts['img_size']),
            transforms.ToTensor(),
            normalize,
        ])),
        batch_size=opts['batch_size'], shuffle=False,
        num_workers=opts['workers'], pin_memory=True)

    if opts['evaluate']:
        validate(val_loader, encoder, decoder, criterion, opts)
        return

    for epoch in range(opts['start-epoch'], opts['epochs']):
        adjust_learning_rate(encoder_optimizer, decoder_optimizer, epoch, opts)

        # train for one epoch
        train(train_loader, encoder, decoder, criterion, encoder_optimizer, decoder_optimizer, epoch, opts)

        # evaluate on validation set
        # acc1 = validate(val_loader, ade, decoder, criterion, opts)

        # remember best acc@1 and save checkpoint
        # is_best = acc1 > best_acc1
        # best_acc1 = max(acc1, best_acc1)

        # 保存训练节点
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': opts['arch'],
            'encoder_state_dict': encoder.state_dict(),
            'decoder_state_dict': decoder.state_dict(),
            'encoder_optimizer' : encoder_optimizer.state_dict(),
            'decoder_optimizer': decoder_optimizer.state_dict(),
        })


def train(train_loader, encoder, decoder, criterion, encoder_optimizer, decoder_optimizer, epoch, opts):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    # switch to train mode
    encoder.train()
    decoder.train()

    end = time.time()
    for i, (input, _) in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)

        if opts['gpu'] is not None:
            input = input.cuda(opts['gpu'], non_blocking=True)
        else:
            input = input.cuda()
        # 设置编码器的标签为输出
        target = input.cuda()      # target.cuda(opts.gpu, non_blocking=True)

        # compute output
        encoded, pool_indices = encoder(input)
        decoded = decoder(encoded, pool_indices)        # 输出解码
        loss = criterion(decoded, target)

        losses.update(loss.item(), input.size(0))

        # compute gradient and do SGD step
        encoder_optimizer.zero_grad()
        decoder_optimizer.zero_grad()

        loss.backward()
        encoder_optimizer.step()
        decoder_optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % opts['print-freq'] == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(
                   epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses))
            # 训练完一个epoch以后更新一次编码效果
            writer.add_scalar('data/Loss', losses.avg, epoch)
            writer.add_image('net/Target Image', target.cpu().data.numpy()[0], epoch)
            writer.add_image('net/Output Image', decoded.cpu().data.numpy()[0], epoch)

def validate(val_loader, encoder, decoder, criterion, opts):
    batch_time = AverageMeter()
    losses = AverageMeter()

    # switch to evaluate mode
    encoder.eval()
    decoder.eval()

    with torch.no_grad():
        end = time.time()
        for i, (input, _) in enumerate(val_loader):
            if opts['gpu'] is not None:
                input = input.cuda(opts.gpu, non_blocking=True)
            else:
                input = input.cuda()
            target = input


            # compute output
            _, encoded, pool_indices, _ = encoder(input)
            decoded = decoder(encoded, pool_indices)  # 输出解码
            loss = criterion(decoded, target)

            # measure accuracy and record loss
            losses.update(loss.item(), input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % opts.print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                       i, len(val_loader), batch_time=batch_time, loss=losses,
                       ))
    return 0        #top1.avg


def save_checkpoint(state, filename='checkpoint.pth.tar'):
    torch.save(state, filename)
    #if is_best:
    #    shutil.copyfile(filename, 'model_best.pth.tar')


class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def adjust_learning_rate(encoder_optimizer, decoder_optimizer, epoch, opts):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    lr = opts['lr'] * (0.1 ** (epoch // 30))
    for param_group in encoder_optimizer.param_groups:
        param_group['lr'] = lr
    for param_group in decoder_optimizer.param_groups:
        param_group['lr'] = lr



if __name__ == '__main__':
    main()
    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()