import torch
from copy import deepcopy
import numpy as np
import os

import torch.nn.functional as F


# yolov2的网络结构之简单，所有其具有很大的剪枝空间
# 但是其目标分类能力弱。
def GetModelPruneDefs(model_cfg):
    CBL_idx = []
    Conv_idx = []
    ignore_idx = set()
    for i, model_module in enumerate(model_cfg):
        i = i - 1
        if model_module['type'] == 'convolutional':
            if model_module['batch_normalize'] == "1":
                CBL_idx.append(i)
            else:
                Conv_idx.append(i)
            # 在最大池化层后面的卷积操作不进行通道裁剪
            if model_cfg[i + 1]['type'] == 'maxpool':
                ignore_idx.add(i)
            # #取消route相关的卷积层压缩
            # if model_cfg[i+1]['type']=='route':
            #     ignore_idx

            # if model_cfg[i]['type']=='reorg':
            #
            #     ignore_idx.add()
    ignore_idx.add(16)
    ignore_idx.add(24)
    ignore_idx.add(28)


    prune_idx = [idx for idx in CBL_idx if idx not in ignore_idx]
    return CBL_idx, Conv_idx, prune_idx


# 获取bn层的通道权重值,其是用于画直方图.
def GatherBnWeights(module_list, prune_idx):
    size_list = [module_list[idx][1].weight.data.shape[0] for idx in prune_idx]
    bn_weights = torch.zeros(sum(size_list))
    index = 0
    for idx, size in zip(prune_idx, size_list):
        bn_weights[index:(index + size)] = module_list[idx][1].weight.data.abs().clone()
        index += size
    return bn_weights


class BnOpt():
    @staticmethod
    def UpdateBn(sr_flag, module_list, s, prune_idx, epoch):
        if sr_flag:
            # 通过epoch来改变权重梯度,一般改变是前期可以大点,后期就改小点,这里可以根据自己的
            # idea进行一些改变
            # 奇葩的事情是，这里abalex训练出来的yolov2权重的bn层的scale都说正值,所以不能使用torch.sign
            # less_zeros_scale_nums=0
            mean_val = 0
            for idx in prune_idx:
                bn_module = module_list[idx][1]
                # 求其均值
                # mean_value = torch.mean(bn_module.weight.data)
                sortw = torch.sort(bn_module.weight.data)[0]
                sortw_index = int(len(sortw) * 0.5)
                mid_value = sortw[sortw_index]
                # 以均值为分界的话，会导致其来回波动
                # bnw = bn_module.weight.data - mid_value
                # less_zeros_scale_nums+=(bn_module.weight.data<0).sum()

                bn_module.weight.grad.data.add_(s * torch.sign(bn_module.weight.data))
            print("权重均值为：", torch.mean(bn_module.weight.data))


def GetInputConvMask(module_defs, idx, CBLidx2mask):
    '''
    获取卷积层的通道mask
    :param module_defs:
    :param idx:
    :param CBLidx2mask:
    :return:
    '''
    if idx == 0:
        return np.ones(3)
    if module_defs[idx - 1]['type'] == 'convolutional':
        return CBLidx2mask[idx - 1]
    elif module_defs[idx - 1]['type'] == 'route':
        route_in_idxs = []
        for layer_i in module_defs[idx - 1]['layers'].split(","):
            if int(layer_i) < 0:
                route_in_idxs.append(idx - 1 + int(layer_i))
            else:
                # 对于第二个rout，其前一个是reorg
                route_in_idxs.append(int(layer_i))

        if len(route_in_idxs) == 1:
            return CBLidx2mask[route_in_idxs[0]]
        elif len(route_in_idxs) == 2:
            # reogs，其中如果是reogs层相关的conv要进行取消,即这里是16层的那个conv要取消剪枝
            if module_defs[route_in_idxs[0]]['type'] == 'reorg':
                #获取上一层的mask
                # nead_add_layer=0
                nead_add_layer= module_defs[route_in_idxs[0]-1]['layers'].split(',')[0]
                    # if int(layer_i)<0:
                idxs=route_in_idxs[0]+int(nead_add_layer)-1

                mask1=CBLidx2mask[idxs]
                mask1=mask1.repeat(4)
            else:
                mask1 = CBLidx2mask[route_in_idxs[0] - 1]
            if module_defs[route_in_idxs[1]]['type'] == 'convolutional':
                mask2 = CBLidx2mask[route_in_idxs[1]]
            else:
                mask2 = CBLidx2mask[route_in_idxs[1] - 1]
            return np.concatenate([mask1, mask2])


def InitWeightsFromLooseModel(compact_model, loss_model, CBL_idx, Conv_idx, CBLidx2mask):
    blocks_del_net = loss_model.blocks.copy()
    blocks_del_net.pop(0)
    for idx in CBL_idx:
        compact_CBL = compact_model.models[idx]
        loose_CBL = loss_model.models[idx]
        out_channel_idx = np.argwhere(CBLidx2mask[idx])[:, 0].tolist()

        compact_bn, losse_bn = compact_CBL[1], loose_CBL[1]
        compact_bn.weight.data = losse_bn.weight.data[out_channel_idx].clone()
        compact_bn.bias.data = losse_bn.bias.data[out_channel_idx].clone()
        compact_bn.running_mean.data = losse_bn.running_mean.data[out_channel_idx].clone()
        compact_bn.running_var.data = losse_bn.running_var.data[out_channel_idx].clone()

        intput_mask = GetInputConvMask(blocks_del_net, idx, CBLidx2mask)
        in_channel_idx = np.argwhere(intput_mask)[:, 0].tolist()
        compact_conv, loose_conv = compact_CBL[0], loose_CBL[0]
        tmp = loose_conv.weight.data[:, in_channel_idx, :, :].clone()
        compact_conv.weight.data = tmp[out_channel_idx, :, :, :].clone()
    for idx in Conv_idx:
        # 其主要是在最后的特征输出卷积层，其没有bn的
        compact_conv = compact_model.models[idx][0]
        loss_conv = loss_model.models[idx][0]
        input_mask = GetInputConvMask(blocks_del_net, idx, CBLidx2mask)
        in_channel_idx = np.argwhere(input_mask)[:, 0].tolist()
        compact_conv.weight.data = loss_conv.weight.data[:, in_channel_idx, :, :].clone()
        compact_conv.bias.data = loss_conv.bias.data.clone()


def write_cfg(cfg_file, module_defs):
    with open(cfg_file, 'w')as f:
        for module_defs in module_defs:
            f.write(f"[{module_defs['type']}]\n")
            for key, value in module_defs.items():
                if key == 'batch_normalize' and value == 0:
                    continue
                if key != 'type':
                    # if key == 'anchors':
                    #     value = ', '.join(','.join(str(int(i)) for i in j) for j in value)
                    f.write(f'{key}={value}\n')
            f.write('\n')
    return cfg_file


def SaveWeights(model, path, cutoff=-1):
    with open(path, 'wb') as f:
        # model.version.tofile(f)
        weights = np.fromfile(f, dtype=np.float32)
    ptr = 0
    for i, (mdef, module) in enumerate(zip(model.blocks[:cutoff], model.models[:cutoff])):
        if mdef['type'] == 'convolutional':
            conv_layer = module[0]
            if mdef['batch_normalize']:
                bn_layer = module[1]
                num_b = bn_layer.bias.numel()
                bn_layer = module[1]
                num_b = bn_layer.bias.numel()  # Number of biases
                # Bias
                bn_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.bias)
                bn_layer.bias.data.copy_(bn_b)
                ptr += num_b
                # Weight
                bn_w = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.weight)
                bn_layer.weight.data.copy_(bn_w)
                ptr += num_b
                # Running Mean
                bn_rm = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_mean)
                bn_layer.running_mean.data.copy_(bn_rm)
                ptr += num_b
                # Running Var
                bn_rv = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(bn_layer.running_var)
                bn_layer.running_var.data.copy_(bn_rv)
                ptr += num_b
                # Load conv. weights
                num_w = conv_layer.weight.numel()
                conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight)
                conv_layer.weight.data.copy_(conv_w)
                ptr += num_w
            else:
                # Load conv. bias
                num_b = conv_layer.bias.numel()
                conv_b = torch.from_numpy(weights[ptr:ptr + num_b]).view_as(conv_layer.bias)
                conv_layer.bias.data.copy_(conv_b)
                ptr += num_b
                # Load conv. weights
                num_w = conv_layer.weight.numel()
                conv_w = torch.from_numpy(weights[ptr:ptr + num_w]).view_as(conv_layer.weight)
                conv_layer.weight.data.copy_(conv_w)
                ptr += num_w
        assert ptr == len(weights)
        return cutoff


def SavePruneRes(hyperparams, model, cfgpath, weightpath, percent):
    pruned_cfg_name = cfgpath.replace('/', f'/prune_{percent}_')
    write_cfg(pruned_cfg_name, model.blocks)
    print(f'Config file has been saved:{pruned_cfg_name}')
    compact_model_name = weightpath.replace('/', f'/prune_{percent}_')
    if compact_model_name.endswith('.pt'):
        compact_model_name = compact_model_name.replace('.pt', '.weights')


    model.save_weights(compact_model_name)
    print(f'Compact model has beeb saved:{compact_model_name}')
