import os
import sys
import random
import time
import warnings
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim
import torch.utils.data
import torch.utils.data.distributed
import torchvision.transforms as transforms
import torchvision.datasets as datasets
import numpy as np
from net.fbnet.fbnet import *
from net.fbnet.train.fbdatasets import FBDataset
from net.fbnet.train.options import opts

from tensorboardX import SummaryWriter
curPath = os.path.abspath(os.path.dirname(__file__))
rootPath = os.path.split(curPath)[0]
PathProject = os.path.split(rootPath)[0]
sys.path.append(rootPath)
sys.path.append(PathProject)

best_acc1 = 0
writer = SummaryWriter()        # 用于可视化

def main():
    if opts['seed'] is not None:
        random.seed(opts['seed'])
        torch.manual_seed(opts['seed'])
        cudnn.deterministic = True
        warnings.warn('You have chosen to seed training. '
                      'This will turn on the CUDNN deterministic setting, '
                      'which can slow down your training considerably! '
                      'You may see unexpected behavior when restarting '
                      'from checkpoints.')

    if opts['gpu'] is not None:
        warnings.warn('You have chosen a specific GPU. This will completely '
                      'disable data parallelism.')

    ngpus_per_node = torch.cuda.device_count()
    # Simply call main_worker function
    main_worker(opts['gpu'], ngpus_per_node, opts)


def main_worker(gpu, ngpus_per_node, opts):
    global best_acc1
    if gpu is not None:
        print("Use GPU: {} for training".format(opts['gpu']))

    # create model
    # model = ADEVGG16(tr=True)
    model = FBNet(is_train=True)

    if gpu is not None:
        torch.cuda.set_device(gpu)
        model = model.cuda(gpu)
    else:
        model = torch.nn.DataParallel(model).cuda()

    # define loss function (criterion) and optimizer
    criterion = nn.MSELoss().cuda(gpu)
    optimizer = torch.optim.SGD(model.parameters(), opts['lr'],
                                momentum=opts['momentum'],
                                weight_decay=opts['weight-decay'])

    # 从断点恢复
    if opts['resume']:
        if os.path.isfile(opts['resume']):
            print("=> loading checkpoint '{}'".format(opts['resume']))
            checkpoint = torch.load(opts['resume'])
            opts['start_epoch'] = checkpoint['epoch']
            best_acc1 = checkpoint['best_acc1']
            if gpu is not None:
                # best_acc1 may be from a checkpoint from a different GPU
                best_acc1 = best_acc1.to(gpu)
            model.load_state_dict(checkpoint['state_dict'])

            optimizer.load_state_dict(checkpoint['optimizer'])

            print("=> loaded checkpoint '{}' (epoch {})"
                  .format(opts['resume'], checkpoint['epoch']))
        else:
            print("=> no checkpoint found at '{}'".format(opts['resume']))
    cudnn.benchmark = True

    # Data loading code
    normalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],
                                     std=[0.229, 0.224, 0.225])

    anno_file = './anno_images.txt'
    #root_dir = 'E:/workspace/DCLT/net/fbnet/train'
    root_dir = '/home/xx/workspace/DCLT/net/fbnet/train/'
    train_dataset = FBDataset(anno_file=anno_file, root_dir=root_dir,im_sz=opts['img_size'],
                              transform=transforms.Compose([
                                # 由于是全卷积,因此对于图像的大小没有要求,取原始尺寸就行
                                transforms.Resize((opts['img_size'], opts['img_size'])),
                                transforms.RandomHorizontalFlip(),
                                transforms.ToTensor(),
                                normalize,
                            ]))

    # train_dataset = datasets.ImageFolder(
    #     traindir,
    #     transforms.Compose([
    #         # 由于是全卷积,因此对于图像的大小没有要求,取原始尺寸就行
    #         # transforms.RandomResizedCrop(opts['img_size']),
    #         transforms.RandomHorizontalFlip(),
    #         transforms.ToTensor(),
    #         normalize,
    #     ]))

    train_loader = torch.utils.data.DataLoader(
        train_dataset, batch_size=opts['batch_size'], shuffle=True,
        num_workers=opts['workers'], pin_memory=True, sampler=None)

    for epoch in range(opts['start-epoch'], opts['epochs']):
        adjust_learning_rate(optimizer, epoch, opts)

        # train for one epoch
        train(train_loader, model, criterion, optimizer, epoch, opts)

        # 保存训练节点
        save_checkpoint({
            'epoch': epoch + 1,
            'arch': opts['arch'],
            'state_dict': model.state_dict(),
            'optimizer' : optimizer.state_dict(),
        })


def train(train_loader, model, criterion, optimizer, epoch, opts):
    batch_time = AverageMeter()
    data_time = AverageMeter()
    losses = AverageMeter()

    # switch to train mode
    model.train()

    end = time.time()
    for i, sample_bached in enumerate(train_loader):
        # measure data loading time
        data_time.update(time.time() - end)
        input = sample_bached['image']
        target = sample_bached['label']
        if opts['gpu'] is not None:
            input = input.cuda(opts['gpu'], non_blocking=True)
            target = target.cuda(opts['gpu'], non_blocking=True)
        else:
            input = input.cuda()
            target = target.cuda()

        # compute output
        pred = model(input)

        # 计算高斯打分的软标签
        loss = fb_loss(target, pred)

        losses.update(loss.item(), input.size(0))

        # compute gradient and do SGD step
        optimizer.zero_grad()

        loss.backward()
        optimizer.step()

        # measure elapsed time
        batch_time.update(time.time() - end)
        end = time.time()

        if i % opts['print-freq'] == 0:
            print('Epoch: [{0}][{1}/{2}]\t'
                  'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                  'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
                  'Loss {loss.val:.4f} ({loss.avg:.4f})'.format(
                   epoch, i, len(train_loader), batch_time=batch_time,
                   data_time=data_time, loss=losses))
            # 训练完一个epoch以后更新一次编码效果
            writer.add_scalar('data/Loss', losses.avg, epoch)
            writer.add_image('net/Target Image', target[1, :, :, :], epoch)
            writer.add_image('net/Output Image', pred[1, :, :, :], epoch)

def validate(val_loader, model, criterion, opts):
    batch_time = AverageMeter()
    losses = AverageMeter()

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        end = time.time()
        for i, (input, soft_label) in enumerate(val_loader):
            if opts['gpu'] is not None:
                input = input.cuda(opts['gpu'], non_blocking=True)
            else:
                input = input.cuda()
            target = input

            # compute output
            output = model(input)

            loss = criterion(output, target)

            losses.update(loss.item(), input.size(0))

            # measure elapsed time
            batch_time.update(time.time() - end)
            end = time.time()

            if i % opts.print_freq == 0:
                print('Test: [{0}/{1}]\t'
                      'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
                      'Loss {loss.val:.4f} ({loss.avg:.4f})\t'.format(
                       i, len(val_loader), batch_time=batch_time, loss=losses,
                       ))
    return 0        #top1.avg


def save_checkpoint(state, filename='checkpoint.pth.tar'):
    torch.save(state, filename)
    #if is_best:
    #    shutil.copyfile(filename, 'model_best.pth.tar')


class AverageMeter(object):
    """Computes and stores the average and current value"""
    def __init__(self):
        self.reset()

    def reset(self):
        self.val = 0
        self.avg = 0
        self.sum = 0
        self.count = 0

    def update(self, val, n=1):
        self.val = val
        self.sum += val * n
        self.count += n
        self.avg = self.sum / self.count


def adjust_learning_rate(optimizer, epoch, opts):
    """Sets the learning rate to the initial LR decayed by 10 every 30 epochs"""
    lr = opts['lr'] * (0.1 ** (epoch // 30))
    for param_group in optimizer.param_groups:
        param_group['lr'] = lr



if __name__ == '__main__':
    main()
    writer.export_scalars_to_json("./all_scalars.json")
    writer.close()