# -*- coding: utf-8 -*-

"""
Created on 10/27/2022
预训练之后的权重高值比较实验
@author: Kang Xiatao (kangxiatao@gmail.com)
"""

import torch

from models.model_base import ModelBase
from models.base.init_utils import weights_init
from configs import *
from utils.network_utils import get_network
from utils.data_utils import get_dataloader
from pruner.pruning import *
from train_test import *
import random


def init_seed(seed):
    # Disable cudnn to maximize reproducibility
    # torch.cuda.cudnn_enabled = False
    # torch.backends.cudnn.deterministic = True
    random.seed(seed)
    np.random.seed(seed)
    torch.manual_seed(seed)
    torch.cuda.manual_seed(seed)


def rank_weight(w, ratio=0.2):
    if isinstance(w, dict):
        all_scores = torch.cat([torch.flatten(x) for x in w.values()])
    else:
        all_scores = torch.flatten(w)

    _top_n = int(len(all_scores) * ratio)
    threshold, _index = torch.topk(all_scores, _top_n)

    return threshold[-1]


def print_ratio_layer(mark):
    total = 0
    remained = 0
    for i, m in mark.items():
        l_n = (m.sum() / m.numel()).item() * 100.0
        total += m.numel()
        remained += m.sum().item()
        print(i, l_n)
    print('all', remained/total * 100.0)


def print_coincide(mask1, mask2):
    _coin = dict()
    _mc1, _mc2 = dict(), dict()
    _total = 0
    _coin_num = 0
    _cnt = 0
    for m, s in mask1.items():
        _tp = mask1[m] * mask2[m]
        _coin[m] = _tp
        _mc1[m] = mask1[m] - _tp
        _mc2[m] = mask2[m] - _tp
        _c_n = torch.sum(mask1[m] * mask2[m] == 1).item()
        _a_n = mask1[m].numel()
        _coin_num += _c_n
        _total += _a_n
        _cnt += 1
        print(f'{_cnt:2d}: {_c_n / _a_n * 100.0:.2f}%')
    print(f'总重合度: {_coin_num/_total * 100.0:.2f}%')
    return _coin, _mc1, _mc2


def temp_train(net, dataset, coin_mark, miss_mark, linde, mode=3):

    # - 加载数据集
    trainloader, testloader = get_dataloader(dataset, 256, 256, 4, root='../Data')

    # - 训练
    learning_rate = 0.01
    weight_decay = 0.0005
    num_epochs = 50
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.SGD(net.parameters(), lr=learning_rate, momentum=0.9, weight_decay=weight_decay, nesterov=True)

    lr_schedule = {0: learning_rate,
                   int(num_epochs * 0.5): learning_rate * 0.1}
    lr_scheduler = PresetLRScheduler(lr_schedule)

    # 用自定义的正则化
    reg = Regularization(weight_decay, mode, coin_mark, miss_mark, linde).cuda()
    optimizer.param_groups[-1]['weight_decay'] = 0

    best_acc = 0
    best_epoch = 0
    for epoch in range(num_epochs):

        # train
        print('\nEpoch: %d' % epoch)
        net.train()
        train_loss = 0
        correct = 0
        total = 0
        for batch_idx, (inputs, targets) in enumerate(trainloader):
            inputs, targets = inputs.cuda(), targets.cuda()
            optimizer.zero_grad()
            outputs = net(inputs)
            loss = criterion(outputs, targets) + reg(net)
            loss.backward()
            optimizer.step()
            train_loss += loss.item()
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()
        print('- Loss: %.3f | Acc: %.3f%% (%d/%d)' % (train_loss / (batch_idx + 1), 100. * correct / total, correct, total))

        # test
        test_acc = test(net, testloader, criterion, epoch, None)
        if test_acc > best_acc and epoch > 5:
            best_acc = test_acc
            best_epoch = epoch

        lr_scheduler(optimizer, epoch)

    return 'best acc: %.4f, epoch: %d\n' % (best_acc, best_epoch)


def main():
    # config = init_config()
    # logger, writer = init_logger(config)
    # init_seed(2022)

    parser = argparse.ArgumentParser()
    parser.add_argument('--p1', type=str, default='runs/pretrain/vgg19_cifar10_pre_train80/train_cifar10_vgg19_prune0_pre_ep80_dense_best.pth.tar',
                        help='path - cifar100_vgg19_anoi_dense_best.pth.tar')
    parser.add_argument('--p2', type=str, default='runs/pretrain/vgg19_cifar100/train_cifar100_vgg19_prune0_anoi_dense_best.pth.tar',
                        help='path - cifar100_vgg19_anoi_dense_best.pth.tar')
    parser.add_argument('--mode', type=int, default=3)
    args = parser.parse_args()

    # ===== build/load model =====
    vgg19_cifar10 = get_network('vgg', 19, 'cifar10', use_bn=True)
    vgg19_cifar10.apply(weights_init)
    vgg19_cifar100 = get_network('vgg', 19, 'cifar100', use_bn=True)
    vgg19_cifar100.apply(weights_init)

    # pretrain_vgg19_cifar10 = 'runs/pretrain/vgg19_cifar10_pre_train80/train_cifar10_vgg19_prune0_pre_ep80_dense_best.pth.tar'
    # # pretrain_vgg19_cifar10 = 'runs/pretrain/vgg19_cifar10/finetune_cifar10_vgg19_l2_best.pth.tar'
    # pretrain_vgg19_cifar100 = 'runs/pretrain/vgg19_cifar100/train_cifar100_vgg19_prune0_anoi_dense_best.pth.tar'

    pretrain_vgg19_cifar10 = args.p1
    pretrain_vgg19_cifar100 = args.p2

    pretrained_dict = torch.load(pretrain_vgg19_cifar10)['net'].state_dict()
    # pretrained_dict = {key: value for key, value in pretrained_dict.items() if 'classifier' not in key}  # 去掉线性层
    vgg19_cifar10.load_state_dict(pretrained_dict, strict=False)

    pretrained_dict = torch.load(pretrain_vgg19_cifar100)['net'].state_dict()
    # pretrained_dict = {key: value for key, value in pretrained_dict.items() if 'classifier' not in key}  # 去掉线性层
    vgg19_cifar100.load_state_dict(pretrained_dict, strict=False)
    print('load model finish')

    # mb_c10 = ModelBase(config.network, config.depth, config.dataset, vgg19_cifar10)
    # mb_c100 = ModelBase(config.network, config.depth, config.dataset, vgg19_cifar100)
    # mb_c10.cuda()
    # mb_c100.cuda()

    vgg19_cifar10.cuda()
    vgg19_cifar100.cuda()

    # - 加载权重
    w1, w2 = dict(), dict()
    for idx, layer in enumerate(vgg19_cifar10.modules()):
        if isinstance(layer, nn.Conv2d):
            # idx-2 和 named_parameters的索引对应
            # w1[idx-2] = layer.weight.data.abs()

            # 按卷积核处理
            x = layer.weight.data
            k1 = x.shape[2]
            k2 = x.shape[3]
            x = torch.sum(x, dim=(2, 3), keepdim=True)
            x = x.repeat(1, 1, k1, k2)
            x = torch.div(x, k1 * k2)
            w1[idx-2] = x.abs()

            # 按过滤器处理
            # x = layer.weight.data
            # n = x.shape[0]
            # k1 = x.shape[2]
            # k2 = x.shape[3]
            # x = torch.sum(x, dim=(0, 2, 3), keepdim=True)
            # x = x.repeat(n, 1, k1, k2)
            # x = torch.div(x, n * k1 * k2)
            # w1[idx-2] = x.abs()
    for idx, layer in enumerate(vgg19_cifar100.modules()):
        if isinstance(layer, nn.Conv2d):
            # w2[idx-2] = layer.weight.data.abs()
            # 按卷积核处理
            x = layer.weight.data
            k1 = x.shape[2]
            k2 = x.shape[3]
            x = torch.sum(x, dim=(2, 3), keepdim=True)
            x = x.repeat(1, 1, k1, k2)
            x = torch.div(x, k1 * k2)
            w2[idx-2] = x.abs()
            # 按过滤器处理
            # x = layer.weight.data
            # n = x.shape[0]
            # k1 = x.shape[2]
            # k2 = x.shape[3]
            # x = torch.sum(x, dim=(0, 2, 3), keepdim=True)
            # x = x.repeat(n, 1, k1, k2)
            # x = torch.div(x, n * k1 * k2)
            # w2[idx-2] = x.abs()

    mark1, mark2 = dict(), dict()
    # - 全局比例不均衡
    # th1 = rank_weight(w1)
    # th2 = rank_weight(w2)
    # for i, _w in w1.items():
    #     mark1[i] = (_w >= th1).float()
    # for i, _w in w2.items():
    #     mark2[i] = (_w >= th2).float()
    # print_ratio_layer(mark1)
    # print_ratio_layer(mark2)
    # - 按层比例
    for i, _w in w1.items():
        mark1[i] = (_w >= rank_weight(_w)).float()
    for i, _w in w2.items():
        mark2[i] = (_w >= rank_weight(_w)).float()

    # - 重合度分析
    coin_m, miss_m1, miss_m2 = print_coincide(mark1, mark2)
    # print_ratio_layer(reg_mark1)
    # print_ratio_layer(reg_mark2)

    # - 获取卷积层
    _ind_layer = list(w1.keys())
    # _cnt = 0
    # for name, param in vgg19_cifar10.named_parameters():
    #     if 'weight' in name and str(_ind_layer[_cnt]) in name:
    #         print(name)
    #         _cnt += 1
    #         if _cnt == len(_ind_layer): break
    # for name, param in vgg19_cifar10.named_parameters():
    #     print(name, len(param.shape))

    # - 调节惩罚再次训练两个模型
    print(temp_train(vgg19_cifar10, 'cifar10', coin_m, miss_m1, _ind_layer, args.mode))
    # print(temp_train(vgg19_cifar100, 'cifar100', coin_m, miss_m2, _ind_layer))


if __name__ == '__main__':
    main()
