# -*- coding: utf-8 -*-

import os
from collections import OrderedDict

import pandas as pd
import torch.backends.cudnn as cudnn
import torch.nn as nn
import torch.optim as optim
from torch.optim import lr_scheduler
from tqdm import tqdm
from utils.util import *
from utils.selectDataset import selectDataset
from network.MSNet import MSNet50


def parse_args():
    parser = argparse.ArgumentParser()

    parser.add_argument('--name', default=None,
                        help='model name: (default: arch+timestamp)')
    parser.add_argument('--dataset', default='nwpu',
                        choices=['ucm', 'nwpu', 'aid', 'rs19', "optimal"],
                        help='dataset name')
    parser.add_argument('--num_classes', default=7)
    parser.add_argument('--dataset_dir', default='data/nwpu_0.1', help='path to ImageNet directory')
    parser.add_argument('--arch', default='(raf-db)ResNet_MS(50)')
    parser.add_argument('--epochs', default=80, type=int)
    parser.add_argument('--lr', '--learning-rate', default=1e-3, type=float)
    parser.add_argument('--milestones', default='40,60', type=str)
    parser.add_argument('--gamma', default=0.1, type=float)
    parser.add_argument('--momentum', default=0.9, type=float)
    parser.add_argument('--weight-decay', default=5e-4, type=float)  # 5e-4
    parser.add_argument('--nesterov', default=False, type=str2bool)
    parser.add_argument('--theta', default=50, type=int)
    parser.add_argument('--alpha', default=15, type=int)
    parser.add_argument('--beta', default=10, type=int)
    parser.add_argument('--MS', default=1)
    parser.add_argument('--is_training', default=1)  # train or test (1,0)
    parser.add_argument('--gpu_id', default=0)
    parser.add_argument('--weight_dir', default='')
    args = parser.parse_args()

    return args


def train(args, train_loader, model, criterion, optimizer, epoch, scheduler=None):
    losses = AverageMeter()
    acc1s = AverageMeter()
    acc5s = AverageMeter()

    model.train()

    for i, (input, target) in tqdm(enumerate(train_loader), total=len(train_loader)):
        input = input.cuda()
        target = target.cuda()
        output, FCB_loss, FDB_loss = model(input, target)
        CE_loss = criterion(output, target)
        loss = CE_loss + 10 * FCB_loss + 15 * FDB_loss
        acc1, acc5 = accuracy(output, target, topk=(1, 5))

        losses.update(loss.item(), input.size(0))
        acc1s.update(acc1.item(), input.size(0))
        acc5s.update(acc5.item(), input.size(0))

        # compute gradient and do optimizing step
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

    log = OrderedDict([
        ('loss', losses.avg),
        ('acc1', acc1s.avg),
        ('acc5', acc5s.avg),
    ])

    return log


def validate(args, val_loader, model, criterion):
    losses = AverageMeter()
    acc1s = AverageMeter()
    acc5s = AverageMeter()

    # switch to evaluate mode
    model.eval()

    with torch.no_grad():
        for i, (input, target) in tqdm(enumerate(val_loader), total=len(val_loader)):
            input = input.cuda()
            target = target.cuda()
            output = model(input, target)
            loss = criterion(output, target)
            acc1, acc5 = accuracy(output, target, topk=(1, 5))
            losses.update(loss.item(), input.size(0))
            acc1s.update(acc1.item(), input.size(0))
            acc5s.update(acc5.item(), input.size(0))

    log = OrderedDict([
        ('loss', losses.avg),
        ('acc1', acc1s.avg),
        ('acc5', acc5s.avg),
    ])

    return log


def main():
    args = parse_args()

    os.environ["CUDA_VISIBLE_DEVICES"] = str(args.gpu_id)
    seed = 2000
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)
    torch.backends.cudnn.deterministic = True
    np.random.seed(seed)
    random.seed(seed)

    if args.name is None:
        args.name = '%s' % args.arch

    if not os.path.exists('weights/%s' % args.name):
        os.makedirs('weights/%s' % args.name)

    print('Config -----')
    for arg in vars(args):
        print('%s: %s' % (arg, getattr(args, arg)))
    print('------------')

    with open('weights/%s/args.txt' % args.name, 'w') as f:
        for arg in vars(args):
            print('%s: %s' % (arg, getattr(args, arg)), file=f)

    criterion = nn.CrossEntropyLoss().cuda()
    cudnn.benchmark = True

    train_loader, test_loader = selectDataset(args.dataset, args.dataset_dir)

    if args.is_training:

        model = MSNet50(args.num_classes, args.theta, pretrained=True)
    else:
        model = MSNet50(args.num_classes, args.theta, pretrained=True)
        net = torch.load(args.weight_dir)
        model.load_state_dict(net)

    print(torch.cuda.is_available())
    model = model.cuda()

    optimizer = optim.SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=args.lr, momentum=args.momentum,
                          weight_decay=args.weight_decay)

    scheduler = lr_scheduler.MultiStepLR(optimizer,
                                         milestones=[int(e) for e in args.milestones.split(',')], gamma=args.gamma)

    log = pd.DataFrame(index=[], columns=[
        'epoch', 'lr', 'loss', 'acc1', 'acc5', 'val_loss', 'val_acc1', 'val_acc5'
    ])

    best_acc = 0
    if args.is_training:
        print("Training")
        for epoch in range(args.epochs):
            print('Epoch [%d/%d]' % (epoch+1, args.epochs))

            # train for one epoch
            train_log = train(args, train_loader, model, criterion, optimizer, epoch)
            # evaluate on validation set
            scheduler.step()
            print('loss %.4f - acc1 %.4f - acc5 %.4f '
                  % (train_log['loss'], train_log['acc1'], train_log['acc5']))
            if (epoch+1) % 5 == 0:
                val_log = validate(args, test_loader, model, criterion)

                print('val_loss %.4f - val_acc %.4f - val_acc5 %.4f'
                      % (val_log['loss'], val_log['acc1'], val_log['acc5']))

                tmp = pd.Series([
                    epoch,
                    scheduler.get_lr()[0],
                    train_log['loss'],
                    train_log['acc1'],
                    train_log['acc5'],
                    val_log['loss'],
                    val_log['acc1'],
                    val_log['acc5'],
                ], index=['epoch', 'lr', 'loss', 'acc1', 'acc5', 'val_loss', 'val_acc1', 'val_acc5'])

                log = log.append(tmp, ignore_index=True)
                log.to_csv('weights/%s/log.csv' % args.name, index=False)

                if val_log['acc1'] > best_acc:
                    torch.save(model.state_dict(), 'weights/%s/model.pth' % args.name)
                    best_acc = val_log['acc1']
                    print("=> saved best model")

    else:
        val_log = validate(args, test_loader, model, criterion)
        print('val_loss %.4f - val_acc %.4f - val_acc5 %.4f' % (val_log['loss'], val_log['acc1'], val_log['acc5']))


if __name__ == '__main__':
    print()

    main()
