# train.py
#!/usr/bin/env	python3

""" train network using pytorch

author baiyu
"""

import os

import argparse
import time

import torch.optim as optim
from datetime import datetime
import timm
from attack import *
from robust import *

from conf import settings
from utils import get_training_dataloader, get_test_dataloader, WarmUpLR, \
    most_recent_folder, most_recent_weights, last_epoch, best_acc_weights
from batch_iter import BatchIter
import wandb

def train(net, epoch, args):

    start = time.time()
    net.train()
    cifar100_training_loader_adv = deepcopy(cifar100_training_loader)
    clean_batch_iter = BatchIter(cifar100_training_loader, args.gpu)
    for batch_index, (images_adv, labels_adv) in enumerate(cifar100_training_loader_adv):
        if epoch <= args.warm:
            warmup_scheduler.step()

        if args.gpu:
            images_adv = images_adv.cuda()
            labels_adv = labels_adv.cuda()

        net.pgd.set_data(images_adv, labels_adv)
        for _, images_adv2, labels2 in net.pgd:
            clean_images, clean_labels = clean_batch_iter.next()
            final_images = torch.cat([images_adv2, clean_images], 0)
            final_label = torch.cat([labels2, clean_labels], 0)
            optimizer.zero_grad()
            outputs = net(final_images)
            loss = loss_function(outputs, final_label)
            loss.backward()
            optimizer.step()

        n_iter = (epoch - 1) * len(cifar100_training_loader) + batch_index + 1

        last_layer = list(net.children())[-1]
        # for name, para in last_layer.named_parameters():
        #     if 'weight' in name:
        #         writer.add_scalar('LastLayerGradients/grad_norm2_weights', para.grad.norm(), n_iter)
        #     if 'bias' in name:
        #         writer.add_scalar('LastLayerGradients/grad_norm2_bias', para.grad.norm(), n_iter)

        print('Training Epoch: {epoch} [{trained_samples}/{total_samples}]\tLoss: {:0.4f}\tLR: {:0.6f}'.format(
            loss.item(),
            optimizer.param_groups[0]['lr'],
            epoch=epoch,
            trained_samples=batch_index * args.b + len(images_adv),
            total_samples=len(cifar100_training_loader.dataset)
        ))
        wandb.log({'epoch': epoch, 'loss': loss.item(), 'LR': optimizer.param_groups[0]['lr']})
        # update training loss for each iteration
        #writer.add_scalar('Train/loss', loss.item(), n_iter)

    for name, param in net.named_parameters():
        layer, attr = os.path.splitext(name)
        attr = attr[1:]
        #writer.add_histogram("{}/{}".format(layer, attr), param, epoch)

    finish = time.time()

    print('epoch {} training time consumed: {:.2f}s'.format(epoch, finish - start))


@torch.no_grad()
def eval_training(epoch=0, tb=True):

    start = time.time()
    net.eval()

    test_loss = 0.0  # cost function error
    correct = 0.0

    for (images, labels) in cifar100_test_loader:

        if args.gpu:
            images = images.cuda()
            labels = labels.cuda()

        outputs = net(images)
        loss = loss_function(outputs, labels)
        test_loss += loss.item()
        _, preds = outputs.max(1)
        correct += preds.eq(labels).sum()

    finish = time.time()
    if args.gpu:
        print('GPU INFO.....')
        print(torch.cuda.memory_summary(), end='')
    print('Evaluating Network.....')
    print('Test set: Average loss: {:.4f}, Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(
        test_loss / len(cifar100_test_loader.dataset),
        correct.float() / len(cifar100_test_loader.dataset),
        finish - start
    ))
    wandb.log({'epoch': epoch, 'Average loss': test_loss / len(cifar100_test_loader.dataset),
               'Accuracy': correct.float() / len(cifar100_test_loader.dataset)})
    print()

    # add informations to tensorboard
    # if tb:
    #     writer.add_scalar('Test/Average loss', test_loss / len(cifar100_test_loader.dataset), epoch)
    #     writer.add_scalar('Test/Accuracy', correct.float() / len(cifar100_test_loader.dataset), epoch)

    return correct.float() / len(cifar100_test_loader.dataset)

def eval_adv_training(net, epoch=0, tb=True):

    start = time.time()
    net.eval()

    test_loss = 0.0  # cost function error
    correct = 0.0

    for (images, labels) in cifar100_test_loader:

        if args.gpu:
            images = images.cuda()
            labels = labels.cuda()
        image_adv = net.adv_generator.generate(images, labels)
        net.eval()
        outputs = net(image_adv)
        loss = loss_function(outputs, labels)
        test_loss += loss.item()
        _, preds = outputs.max(1)
        correct += preds.eq(labels).sum()

    finish = time.time()
    print('Evaluating adv Network.....')
    print('Test set: Average adv loss: {:.4f}, Accuracy: {:.4f}, Time consumed:{:.2f}s'.format(
        test_loss / len(cifar100_test_loader.dataset),
        correct.float() / len(cifar100_test_loader.dataset),
        finish - start
    ))
    wandb.log({'epoch': epoch, 'adv Average loss': test_loss / len(cifar100_test_loader.dataset),
               'adv Accuracy': correct.float() / len(cifar100_test_loader.dataset)})
    print()

    # add informations to tensorboard
    # if tb:
    #     writer.add_scalar('Test/Average loss', test_loss / len(cifar100_test_loader.dataset), epoch)
    #     writer.add_scalar('Test/Accuracy', correct.float() / len(cifar100_test_loader.dataset), epoch)

    return correct.float() / len(cifar100_test_loader.dataset)


if __name__ == '__main__':

    parser = argparse.ArgumentParser()
    parser.add_argument('-net', type=str, help='net type', default='resnet50')
    parser.add_argument('-gpu', action='store_true', default=True, help='use gpu or not')
    parser.add_argument('-b', type=int, default=64, help='batch size for dataloader')
    parser.add_argument('-warm', type=int, default=2, help='warm up training phase')
    parser.add_argument('-lr', type=float, default=0.01, help='initial learning rate')
    parser.add_argument('-resume', action='store_true', default=False, help='resume training')
    parser.add_argument('-adv_type', default='con', type=str, help='Adversarial type')
    parser.add_argument('-load_from', type=str, help='load file')
    args = parser.parse_args()

    wandb.init(project="fintune_adv_OnlyBnLn_" + args.net + '_' + datetime.strftime(datetime.now(), "%b%d_%Y_%H_%M_%S"))
    net = timm.create_model(args.net, pretrained=True, num_classes=100)
    net.requires_grad_(False)
    bn_layers = nn.ModuleList()
    for module in net.modules():
        if isinstance(module, nn.BatchNorm2d) or isinstance(module, nn.LayerNorm):
            bn_layers.append(module)
    bn_layers.requires_grad_(True)
    net = net.cuda() if args.gpu else net.cpu()
    net.dataset_mean_t = torch.tensor(settings.CIFAR100_TRAIN_MEAN).view(1, -1, 1, 1).requires_grad_(False).cuda()
    net.dataset_std_t = torch.tensor(settings.CIFAR100_TRAIN_STD).view(1, -1, 1, 1).requires_grad_(False).cuda()

    def criterion_mlb(out, lable):
        return loss_function(out, lable)

    pgd = PGD(net, img_transform=(lambda x: (x - net.dataset_mean_t) / net.dataset_std_t,
                                        lambda x: (x * net.dataset_std_t + net.dataset_mean_t)))

    pgd.set_para(eps=8 / 255.0, alpha=lambda: 8 / 255.0, iters=10)
    adv_dict = {'cls': (CLS_ADG, criterion_mlb), 'loc': (LOC_ADG, criterion_mlb), 'con': (CON_ADG, criterion_mlb),
                'mtd': (MTD, criterion_mlb),
                'cwat': (CWAT, criterion_mlb)}
    adv_item = adv_dict[args.adv_type.lower()]
    net.adv_generator = adv_item[0](pgd, adv_item[1])
    net.pgd = pgd

    # data preprocessing:
    cifar100_training_loader = get_training_dataloader(
        settings.CIFAR100_TRAIN_MEAN,
        settings.CIFAR100_TRAIN_STD,
        num_workers=4,
        batch_size=args.b,
        shuffle=True
    )

    cifar100_test_loader = get_test_dataloader(
        settings.CIFAR100_TRAIN_MEAN,
        settings.CIFAR100_TRAIN_STD,
        num_workers=4,
        batch_size=args.b,
        shuffle=True
    )

    loss_function = nn.CrossEntropyLoss().cuda()
    optimizer = optim.SGD(filter(lambda p: p.requires_grad, net.parameters()), lr=args.lr, momentum=0.9, weight_decay=5e-4)
    train_scheduler = optim.lr_scheduler.MultiStepLR(
        optimizer, milestones=settings.MILESTONES_ADV_FINTUNE, gamma=0.2)  # learning rate decay
    iter_per_epoch = len(cifar100_training_loader)
    warmup_scheduler = WarmUpLR(optimizer, iter_per_epoch * args.warm)

    if args.resume:
        recent_folder = most_recent_folder(os.path.join(settings.CHECKPOINT_PATH, args.net), fmt=settings.DATE_FORMAT)
        if not recent_folder:
            raise Exception('no recent folder were found')

        checkpoint_path = os.path.join(settings.CHECKPOINT_PATH, args.net, recent_folder)

    else:
        checkpoint_path = os.path.join(settings.CHECKPOINT_PATH, args.net, settings.TIME_NOW)

    # use tensorboard
    if not os.path.exists(settings.LOG_DIR):
        os.mkdir(settings.LOG_DIR)

    # since tensorboard can't overwrite old values
    # so the only way is to create a new tensorboard log
    # writer = SummaryWriter(log_dir=os.path.join(
    #         settings.LOG_DIR, args.net, settings.TIME_NOW))
    input_tensor = torch.Tensor(1, 3, 32, 32).cuda()
    # writer.add_graph(net, input_tensor)

    # create checkpoint folder to save model
    if not os.path.exists(checkpoint_path):
        os.makedirs(checkpoint_path)
    checkpoint_path = os.path.join(checkpoint_path, '{net}-{epoch}-{type}.pth')

    best_acccc = 0.0
    best_adv_acccc = 0.0
    if args.load_from:
        net.load_state_dict(torch.load(args.load_from), strict=False)
    if args.resume:
        best_weights = best_acc_weights(os.path.join(settings.CHECKPOINT_PATH, args.net, recent_folder))
        if best_weights:
            weights_path = os.path.join(settings.CHECKPOINT_PATH, args.net, recent_folder, best_weights)
            print('found best acc weights file:{}'.format(weights_path))
            print('load best training file to test acc...')
            net.load_state_dict(torch.load(weights_path))
            best_acc = eval_training(tb=False)
            print('best acc is {:0.2f}'.format(best_acc))

        recent_weights_file = most_recent_weights(os.path.join(settings.CHECKPOINT_PATH, args.net, recent_folder))
        if not recent_weights_file:
            raise Exception('no recent weights file were found')
        weights_path = os.path.join(settings.CHECKPOINT_PATH, args.net, recent_folder, recent_weights_file)
        print('loading weights file {} to resume training.....'.format(weights_path))
        net.load_state_dict(torch.load(weights_path))

        resume_epoch = last_epoch(os.path.join(settings.CHECKPOINT_PATH, args.net, recent_folder))

    for epoch in range(1, settings.EPOCH_ADV_FINTUNE):
        if epoch > args.warm:
            train_scheduler.step()

        if args.resume:
            if epoch <= resume_epoch:
                continue

        train(net, epoch, args)
        acc = eval_training(epoch)
        if best_acccc < acc:
            best_acccc = acc
        print('The best acc is {:0.4f}'.format(best_acccc))
        print()
        acc_adv = eval_adv_training(net, epoch)
        if best_adv_acccc < acc_adv:
            best_adv_acccc = acc_adv
        print('The best adv_acc is {:0.4f}'.format(best_adv_acccc))
        wandb.log({'epoch': epoch, 'best_acc': best_acccc, 'best_adv_acc': best_adv_acccc})

        # start to save best performance model after learning rate decay to 0.01
        # if epoch > settings.MILESTONES_ADV_FINTUNE[1] and best_acc < acc:
        #     torch.save(net.state_dict(), checkpoint_path.format(net=args.net, epoch=epoch, type='best'))
        #     best_acc = acc
        #     continue

        if not epoch % settings.SAVE_EPOCH or epoch == settings.EPOCH_ADV_FINTUNE - 1:
            torch.save(net.state_dict(), checkpoint_path.format(net=args.net, epoch=epoch, type='adv_fintune_OnlyBnLn'))
    wandb.finish()
