
'''
    对网络模型进行压缩部署
    将Batch Normalization层融入卷积层
'''

import os
import copy
import torch
import torch.nn as nn
from utils.utils import get_classes, makedir
from nets.fused_mobilenet_v3 import mobilenet_v3



#------------------------------------------------------#
#   visual_out_path        模型轻量化保存的文件夹
#------------------------------------------------------#
light_out_path = "net_light"


class lightModel(object):
    _defaults = {
        #--------------------------------------------------------------------------#
        #   model_path指向logs文件夹下的权值文件，classes_path指向model_data下的txt
        #--------------------------------------------------------------------------#
        "model_path"   : 'logs/loss_acc_lr_2024_01_22_11_17_06_fused2_Eca_spa1/best_epoch_weights.pth',
        "classes_path" : 'model_data/news_classes.txt',
    }

    #---------------------------------------------------#
    #   初始化lightModel
    #--------------------------------------------------#
    def __init__(self, **kwargs):
        self.__dict__.update(self._defaults)  # 把_defaults中的参数，初始化为类的属性
        for name, value in kwargs.items():
            setattr(self, name, value)  # 设置类的属性

        #---------------------------------------------------#
        #   获得种类
        #---------------------------------------------------#
        self.class_names, self.num_classes = get_classes(self.classes_path)
        self.generate()
        makedir(light_out_path)

    #---------------------------------------------------#
    #   载入模型与权值
    #---------------------------------------------------#
    def generate(self):
        self.model = mobilenet_v3(pretrained=False, mode='small', num_classes=self.num_classes)
        device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
        self.model_parameters = torch.load(self.model_path, map_location=device)
        self.model.load_state_dict(self.model_parameters)

    #--------------------------------------------#
    #   将Batch Normalization层融入卷积层
    #--------------------------------------------#
    def fuse_conv_bn(self):
        local_parameter = copy.deepcopy(self.model_parameters)
        #------------------------------------------------------------------------------#
        #   model.named_modules() 返回generator, 能够按顺序迭代网络的每一层及其子层以及名字
        #------------------------------------------------------------------------------#
        for model_name, module in self.model.named_modules():
            if isinstance(module, nn.Conv2d):
                #--------------------------------------------#
                #   获取卷积层的参数
                #--------------------------------------------#
                conv_name = model_name
                kernel = module.weight

            elif isinstance(module, nn.BatchNorm2d):
                bn_name = model_name
                #--------------------------------------------#
                #   获取BN层的参数
                #--------------------------------------------#
                running_mean = module.running_mean
                running_var = module.running_var
                gamma = module.weight
                beta = module.bias
                eps = module.eps
                std = (running_var + eps).sqrt()

                #--------------------------------------------#
                #   融合卷积层和BN层, 得到新的卷积层参数
                #--------------------------------------------#
                t = (gamma / std).reshape(-1, 1, 1, 1)  # [ch] -> [ch, 1, 1, 1]
                kernel = kernel * t
                bias = beta - running_mean * gamma / std

                #--------------------------------------------#
                #   更改字典中卷积层的参数
                #--------------------------------------------#
                local_parameter[conv_name + '.weight'] = kernel
                local_parameter[conv_name + '.bias'] = bias

                #--------------------------------------------#
                #   删除字典中BN层的参数
                #--------------------------------------------#
                del local_parameter[bn_name + '.weight']
                del local_parameter[bn_name + '.bias']
                del local_parameter[bn_name + '.running_mean']
                del local_parameter[bn_name + '.running_var']
                del local_parameter[bn_name + '.num_batches_tracked']

        #--------------------------------------------#
        #   保存模型的参数
        #--------------------------------------------#
        torch.save(local_parameter, os.path.join(light_out_path, "best_epoch_weights.pth"))
        print('fuse conv bn do!')

if __name__ == "__main__":

    dModel = lightModel()
    dModel.fuse_conv_bn()


