# -*- coding: utf-8 -*-

"""
Created on 10/29/2022
regul.
@author: Kang Xiatao (kangxiatao@gmail.com)
"""

import torch


# Based on https://github.com/tjuxiaofeng/Group-Sparse-DNN-for-AMC/blob/master/utils/regul.py#5
class Regularization(torch.nn.Module):
    def __init__(self, weight_decay, p=2, coin_mark=None, miss_mark=None, linde=None):
        """
            :param weight_decay: Regularization parameters
            :coin_mark: 高值重合部分
            :mis_mark: 高值不重合部分
        """
        super(Regularization, self).__init__()
        # if p == 1:
        #     print("Using L1 penalty")
        # elif p == 2:
        #     print("Using L2 penalty")
        self.weight_decay = weight_decay
        self.p = p
        self.coin_m = coin_mark
        self.miss_m = miss_mark
        self.it = linde

        # - 以原模型进行惩罚
        if self.p == 5:
            self.wl = []
            for name, param in coin_mark.named_parameters():
                if 'weight' in name or 'bias' in name:
                    self.wl.append(param)


    def forward(self, model):
        # weight_list = self.get_weight(model)  # 获得最新的权重
        # reg_loss = self.regularization_loss(weight_list, self.weight_decay, p=self.p)
        reg_loss = 0

        if self.p == 1:
            return 0
        elif self.p == 2:
            # - 正则化损失
            for name, param in model.named_parameters():
                if 'weight' in name or 'bias' in name:
                    reg_loss += torch.sum(param ** 2)
            return 0.5 * self.weight_decay * reg_loss

        # - 以原模型进行惩罚
        elif self.p == 5:
            _cnt = 0
            for name, param in model.named_parameters():
                if 'weight' in name or 'bias' in name:
                    if param.size() == self.wl[_cnt].size():
                        reg_loss += torch.sum((param - self.wl[_cnt]) ** 2)
                    else:
                        reg_loss += torch.sum(param ** 2)
                    _cnt += 1
            return 0.5 * self.weight_decay * reg_loss

            # - 正则化简化至梯度中
            # for name, param in model.named_parameters():
            #     # 仅约束卷积和线性层的weight
            #     # if 'weight' in name and len(param.shape) > 1:
            #     if 'weight' in name or 'bias' in name:
            #         param.grad = param.grad + self.weight_decay * param

        # - 加大高值不重合权重惩罚 高值重合部分不惩罚
        elif self.p == 3:
            for name, param in model.named_parameters():
                if 'weight' in name or 'bias' in name:
                    reg_loss += torch.sum(param ** 2) * self.weight_decay
            _cnt = 0
            for name, param in model.named_parameters():
                if 'weight' in name and str(self.it[_cnt]) in name:
                    reg_loss -= torch.sum((param * self.coin_m[self.it[_cnt]]) ** 2) * self.weight_decay
                    reg_loss += torch.sum((param * self.miss_m[self.it[_cnt]]) ** 2) * self.weight_decay * 4
                    _cnt += 1
                    if _cnt == len(self.it): break
            return 0.5 * reg_loss

        # - 加大高值不重合权重惩罚 高值重合部分0.2倍惩罚
        elif self.p == 4:
            for name, param in model.named_parameters():
                if 'weight' in name or 'bias' in name:
                    reg_loss += torch.sum(param ** 2) * self.weight_decay
            _cnt = 0
            for name, param in model.named_parameters():
                if 'weight' in name and str(self.it[_cnt]) in name:
                    reg_loss -= torch.sum((param * self.coin_m[self.it[_cnt]]) ** 2) * self.weight_decay * 0.8
                    reg_loss += torch.sum((param * self.miss_m[self.it[_cnt]]) ** 2) * self.weight_decay * 4
                    _cnt += 1
                    if _cnt == len(self.it): break
            return 0.5 * reg_loss

        return 0

    def get_weight(self, model):
        """
        Get a list of model weights
            :param model:
            :return:
        """
        weight_list = []
        for name, param in model.named_parameters():
            if 'weight' in name:
                weight = (name, param)
                weight_list.append(weight)
        return weight_list

    def regularization_loss(self, weight_list, weight_decay, p=2):
        """
        Compute regularization loss
            :param weight_list:
            :param weight_decay:
            :param p:
            :return:
        """
        reg_loss = 0
        for name, w in weight_list:
            l_reg1 = 0
            if p == 1:
                l_reg1 = torch.sum(torch.abs(w))
            elif p == 2:
                l_reg1 = torch.sum(w ** 2)
            reg_loss = reg_loss + l_reg1

        return weight_decay * reg_loss

    def weight_info(self, weight_list):
        """
        Print the weight information
            :param weight_list:
            :return:
        """
        print("---------------regularization weight---------------")
        for name, w in weight_list:
            print(name)
        print("---------------------------------------------------")