# -*- coding: utf-8 -*-
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import datasets, transforms
import os
import numpy as np

from models.vgg import vgg
from config import config as args

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')


def test(model, criterion, test_loader):
    # print(model)
    model.eval()
    test_loss = 0.
    correct = 0.
    for data, target in test_loader:
        data, target = data.to(device), target.to(device)
        data, target = Variable(data), Variable(target)

        with torch.no_grad():
            output = model(data)
        test_loss += criterion(output, target).item()
        pred = output.data.max(1, keepdim=True)[1]
        correct += pred.eq(target.data.view_as(pred)).cpu().sum()

    test_loss /= len(test_loader.dataset)
    print('Test set: Average loss: {:.4f}, Accuracy: {}/{} ({:.1f}%)'.format(
        test_loss, correct, len(test_loader.dataset),
        100. * correct / len(test_loader.dataset)))

if __name__ == '__main__':
    # 保存减枝路径
    if not os.path.exists(args.checkout):
        os.mkdir(args.checkout)

    # 训练集
    test_transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010))
    ])
    test_set = datasets.CIFAR10("data", train=False, download=True, transform=test_transform)
    test_loader = DataLoader(test_set, batch_size=args.batch_size, num_workers=args.num_workers,
                             shuffle=False, drop_last=True, pin_memory=True)

    # 模型
    model = vgg().to(device)  # cpu -> gpu

    # 损失
    criterion = nn.CrossEntropyLoss()

    # 加载模型参数
    if args.best_model != "":
        print("=> loading best checkout '{}'".format(args.best_model))
        checkout = torch.load(args.best_model)
        model.load_state_dict(checkout)

    # 测试
    test(model, criterion, test_loader)

    # L1减枝
    #     [64, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 512, 512, 512, 512, 'M', 512, 512, 512, 512]
    cfg = [32, 64, 'M', 128, 128, 'M', 256, 256, 256, 256, 'M', 256, 256, 256, 256, 'M', 256, 256, 256, 256]

    cfg_mask = []
    layer_id = 0
    for m in model.modules():
        if isinstance(m, nn.Conv2d):
            out_channels = m.weight.data.shape[0]
            if out_channels == cfg[layer_id]:
                cfg_mask.append(torch.ones(out_channels))
                layer_id += 1
                continue
            weight_copy = m.weight.data.abs().clone()
            weight_copy = weight_copy.cpu().numpy()
            L1_norm = np.sum(weight_copy, axis=(1,2,3)) # 计算所有卷核的总和
            arg_max = np.argsort(L1_norm) # 排序,从小到达
            arg_max_rev = arg_max[::-1][:cfg[layer_id]]  # ,权重大, arg_max[::-1],逆序,从大到小
            assert arg_max_rev.size == cfg[layer_id] # 判断拿到是否相等
            mask = torch.zeros(out_channels)
            mask[arg_max_rev.tolist()] = 1
            cfg_mask.append(mask) # 拿到权重排序前的索引
            layer_id += 1
        elif isinstance(m, nn.MaxPool2d):
            layer_id += 1

    # 创建新的减枝模型
    newmodel = vgg(cfg=cfg)
    newmodel.to(device)

    # 拷贝权重
    start_mask = torch.ones(3)
    layer_id_in_cfg = 0
    end_mask = cfg_mask[layer_id_in_cfg]
    for [m0, m1] in zip(model.modules(), newmodel.modules()):
        if isinstance(m0, nn.BatchNorm2d):
            idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
            if idx1.size == 1:
                idx1 = np.resize(idx1, (1,))
            m1.weight.data = m0.weight.data[idx1.tolist()].clone()
            m1.bias.data = m0.bias.data[idx1.tolist()].clone()
            m1.running_mean = m0.running_mean[idx1.tolist()].clone()
            m1.running_var = m0.running_var[idx1.tolist()].clone()
            layer_id_in_cfg += 1
            start_mask = end_mask
            if layer_id_in_cfg < len(cfg_mask): # 不包含全链接
                end_mask = cfg_mask[layer_id_in_cfg]
        elif isinstance(m0, nn.Conv2d):
            idx0 = np.squeeze(np.argwhere(np.asarray(start_mask.cpu().numpy())))
            idx1 = np.squeeze(np.argwhere(np.asarray(end_mask.cpu().numpy())))
            print("In shape: {:d}, Out shape {:d}".format(idx0.size, idx1.size))
            if idx0.size == 1:
                idx0 = np.resize(idx0, (1,))
            if idx1.size == 1:
                idx1 = np.resize(idx1, (1,))
            w1 = m0.weight.data[:, idx0.tolist(), :,:].clone()
            w1 = w1[idx1.tolist(), :,:,:].clone()
            m1.weight.data = w1.clone()
        elif isinstance(m0, nn.Linear):
            if layer_id_in_cfg == len(cfg_mask):
                idx0 = np.squeeze(np.argwhere(np.asarray(cfg_mask[-1].cpu().numpy())))
                if idx0.size == 1:
                    idx0 = np.resize(idx0, (1,))
                m1.weight.data = m0.weight.data[:, idx0].clone()
                m1.bias.data = m0.bias.data.clone()
                layer_id_in_cfg += 1
                continue
        elif isinstance(m0, nn.BatchNorm1d):
            m1.weight.data = m0.weight.data.clone()
            m1.bias.data = m0.bias.data.clone()
            m1.running_mean = m0.running_mean.clone()
            m1.running_var = m0.running_var.clone()

    # 测试
    test(newmodel, criterion, test_loader)

    # 保存
    checkout = {"cfg":cfg, "checkout":newmodel.state_dict()}
    torch.save(checkout, args.prune_model)










