import shutil
import time
import os

import matplotlib
import matplotlib.pyplot as plt
import numpy as np

import torch
import torchvision.transforms as transforms
import torchvision

from models.vgg import VGG

configs = {
    'EVAL_ALL': False,  # 如果为False，则只测试剪枝PRUNE_K后的模型表现；如果为TRUE，则测试剪枝[0, PRUNE_K]后的模型表现
    'PRUNE_K': 56,  # 取值范围：[0, 512]
    'SAVE_RESULTS': False,  # 是否保存到文件中去
}

os.environ["CUDA_VISIBLE_DEVICES"] = "0"
classes = ("airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck")


def pruneModel(modelLayer, k, channel_indices):
    """
    进行剪枝操作，模型结构如下：
        Sequential(
      (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (2): ReLU(inplace=True)
      (3): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (4): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (5): ReLU(inplace=True)
      (6): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
      (7): Conv2d(64, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (8): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (9): ReLU(inplace=True)
      (10): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (11): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (12): ReLU(inplace=True)
      (13): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
      (14): Conv2d(128, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (15): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (16): ReLU(inplace=True)
      (17): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (18): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (19): ReLU(inplace=True)
      (20): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (21): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (22): ReLU(inplace=True)
      (23): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (24): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (25): ReLU(inplace=True)
      (26): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
      (27): Conv2d(256, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (28): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (29): ReLU(inplace=True)
      (30): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (31): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (32): ReLU(inplace=True)
      (33): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (34): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (35): ReLU(inplace=True)
      (36): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (37): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (38): ReLU(inplace=True)
      (39): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
      (40): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (41): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (42): ReLU(inplace=True)
      (43): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (44): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (45): ReLU(inplace=True)
      (46): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (47): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (48): ReLU(inplace=True)
      (49): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))
      (50): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
      (51): ReLU(inplace=True)
      (52): MaxPool2d(kernel_size=2, stride=2, padding=0, dilation=1, ceil_mode=False)
      (53): AvgPool2d(kernel_size=1, stride=1, padding=0)
    )
    Args:
        modelLayer: 所有卷积模块
        k: 待剪枝的层的层数
        channel_indices: 被剪枝层被剪掉的channel的索引
    """
    with torch.no_grad():
        layer = modelLayer[k]
        channel_list = [int(channel_indices[idx]) for idx in range(channel_indices.shape[0])]
        for k, v in layer.named_parameters():
            for idx in channel_list:
                if k == 'bias':  # k.shape = torch.size([512])
                    v[idx].zero_()
                if k == 'weight':  # 此时v.shape = torch.Size([512, 512, 3, 3])
                    v[idx, :, :, :].zero_()


def checkPruneModel(modelLayer, k, channel_indices):
    '''
    验证剪枝操作是否落实，参数同pruneModel
    '''
    with torch.no_grad():
        layer = modelLayer[k]
        channel_list = [int(channel_indices[idx]) for idx in range(channel_indices.shape[0])]
        for k, v in layer.named_parameters():
            for idx in channel_list:
                if k == 'bias':
                    print(v[idx])
                if k == 'weight':  # 此时v.shape = torch.Size([512, 512, 3, 3])
                    print(v[idx, :, :, :])


def showFeatureMap(featureMap):
    """
    画出特征图
    （这个colorbar的画法，真不人性化）
    """
    featureMap = featureMap.squeeze(0)
    featureMapNum = featureMap.shape[0]

    row_num = int(np.ceil(np.sqrt(featureMapNum)))

    plt.figure()
    for idx in range(1, featureMapNum + 1):
        plt.subplot(row_num, row_num, idx)
        plt.imshow(featureMap[idx - 1].cpu(), cmap='gray')
        plt.axis('off')
    # plt.colorbar()
    plt.savefig("./images/toImg.png")
    plt.show()


def calculateFeatureMap(featureMaps):
    """
    计算每个batch的feature map的平均值
    Args:
        featureMaps: torch.Tensor, size([batch_size, 512, 2, 2])
    """
    featureMapInSameDim = [featureMaps[:, idx, :, :] for idx in range(featureMaps.shape[1])]
    meanMapInSameDim = [torch.mean(featureMapInSameDim[idx], dim=0) for idx in range(featureMaps.shape[1])]

    return torch.stack(meanMapInSameDim)  # torch.Size([512, 2, 2])


def getKLayerFeatureMap(modelLayer, k, inputs):
    with torch.no_grad():
        for idx, layer in enumerate(modelLayer):
            inputs = layer(inputs)
            if idx == k:
                return inputs


def pred(model, test_loader, device, k):
    """
    测试模型，输出并保存结果
    """
    N_CLASSES = len(classes)
    class_correct = [0. for i in range(N_CLASSES)]
    class_total = [0. for i in range(N_CLASSES)]
    correct = 0
    total = 0
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            inputs, targets = inputs.to(device), targets.to(device)
            outputs = model(inputs)
            _, predicted = outputs.max(1)
            total += targets.size(0)
            correct += predicted.eq(targets).sum().item()

            c = (predicted == targets).squeeze()

            for i in range(len(targets)):
                label = targets[i]
                class_correct[label] += c[i].item()
                class_total[label] += 1

    print('\n=================== 剪枝 {} ==================='.format(k))  # k：剪枝数
    print("Acc: {:.2f}".format(100. * correct / total))
    if configs['SAVE_RESULTS']:
        with open("./results/total.txt", 'a') as f:
            f.write(str(100. * correct / total))
            f.write('\n')

    for i in range(N_CLASSES):
        print("Acc of {}: {:.2f}".format(classes[i], 100. * class_correct[i] / class_total[i]))
        if configs['SAVE_RESULTS']:
            with open("./results/{}.txt".format(classes[i]), 'a') as f:
                f.write(str(100. * class_correct[i] / class_total[i]))
                f.write('\n')
    print('==============================================\n')


def main():
    device = 'cuda' if torch.cuda.is_available() else 'cpu'
    state = torch.load('./ckpt/best.pth')
    net_params = state['net']
    # acc = state['acc']
    # epoch = state['epoch']
    # print(f'acc: {acc}, epoch: {epoch}')
    model = VGG('VGG19')
    model.to(device)
    model.load_state_dict(net_params)
    model.eval()

    transform_test = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
    ])

    test_dataset = torchvision.datasets.CIFAR10(
        root='./data', train=False, download=True, transform=transform_test)
    test_loader = torch.utils.data.DataLoader(
        test_dataset, batch_size=5000, shuffle=False, num_workers=2)

    # 剪枝前模型测试
    # pred(model, test_loader, device)

    modelLayers = list(model.children())
    modelLayer = modelLayers[0]
    # print(modelLayer)
    KthLayer = 49
    meanMapInSameDimList = []
    with torch.no_grad():
        for batch_idx, (inputs, targets) in enumerate(test_loader):
            inputs, targets = inputs.to(device), targets.to(device)
            feature_maps = getKLayerFeatureMap(modelLayer, KthLayer, inputs)
            meanMapInSameDim = calculateFeatureMap(feature_maps)
            meanMapInSameDimList.append(meanMapInSameDim)

        mean_feature_maps = torch.mean(torch.stack(meanMapInSameDimList), dim=0)
        showFeatureMap(mean_feature_maps)

        if configs['SAVE_RESULTS'] and os.path.exists(os.path.join(os.getcwd(), "results")):
            shutil.rmtree("./results")
        if not os.path.exists(os.path.join(os.getcwd(), "results")):
            os.mkdir(os.path.join(os.getcwd(), "results"))
        # channel_indices: torch.Tensor，需要被剪枝的k个channel
        if not configs['EVAL_ALL']:
            channel_indices = torch.topk(torch.mean(mean_feature_maps.reshape((512, -1)), dim=-1) * -1, k=configs['PRUNE_K'])[1]
            pruneModel(modelLayer, KthLayer, channel_indices)
            # 剪枝后测试模型
            pred(model, test_loader, device, configs['PRUNE_K'])
        elif configs['EVAL_ALL']:
            for k in range(configs['PRUNE_K'] + 1):
                channel_indices = torch.topk(torch.mean(mean_feature_maps.reshape((512, -1)), dim=-1) * -1, k=k)[1]
                pruneModel(modelLayer, KthLayer, channel_indices)
                # 剪枝后测试模型
                pred(model, test_loader, device, k)
        # ================== 检查剪枝操作是否落实 ======================
        # modelLayers = list(model.children())
        # modelLayer = modelLayers[0]
        # checkPruneModel(modelLayer, KthLayer, channel_indices)


if __name__ == '__main__':
    time_start = time.time()
    main()
    time_end = time.time()
    print("Total running time: {:.4f}s".format(time_end - time_start))  # 单位为s
