# -*- coding: utf-8 -*-

"""
Created on 03/20/2021
penalty.
@author: Kang Xiatao (kangxiatao@gmail.com)
"""

#
import tensorflow as tf
import math


class GoGoGoLoss(tf.keras.losses.Loss):
    def __init__(self, model, args, name="gogogoloss"):
        super().__init__(name=name)
        self.model = model
        self.args = args

    def call(self, y_true, y_pred):
        _l1_reg = 0
        _l2_reg = 0
        _var_reg = 0
        _grouplasso = 0

        # _cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y_true, logits=y_pred))
        _cost = tf.losses.categorical_crossentropy(y_true, y_pred, from_logits=True)

        if self.args.l1_value != 0.0:
            loss_regularization = []
            for p in self.model.trainable_variables:
                if 'conv' in p.name and 'kernel' in p.name:
                    loss_regularization.append(tf.math.reduce_sum(tf.math.abs(p)))
            _l1_reg = self.args.l1_value * tf.reduce_sum(tf.stack(loss_regularization))

        if self.args.l2_value != 0.0:
            loss_regularization = []
            for p in self.model.trainable_variables:
                if 'conv' in p.name and 'kernel' in p.name:
                    loss_regularization.append(tf.nn.l2_loss(p))
            _l2_reg = self.args.l2_value * tf.reduce_sum(tf.stack(loss_regularization))

        if self.args.var_1 != 0.0 or self.args.var_2 != 0.0:
            cos = tf.constant(0.0)
            pi = tf.constant(math.pi)
            for weight in self.model.trainable_variables:
                if 'conv' in weight.name and 'kernel' in weight.name:
                    # 这里先求出其范数值，然后按范数值大小得到索引，取前x%
                    channel_norm = tf.sqrt(tf.reduce_sum(tf.square(weight), axis=(0, 1, 3)))
                    filter_norm = tf.sqrt(tf.reduce_sum(tf.square(weight), axis=(0, 1, 2)))

                    channel_norm_index = tf.argsort(channel_norm, direction='DESCENDING')
                    channel_norm_index = channel_norm_index[:int(channel_norm_index.shape[0] * self.args.penalty_ratio)]  # 取前x%
                    channel_weight = tf.gather(weight, channel_norm_index, axis=2)
                    filter_norm_index = tf.argsort(filter_norm, direction='DESCENDING')
                    filter_norm_index = filter_norm_index[:int(filter_norm_index.shape[0] * self.args.penalty_ratio)]  # 取前x%
                    filter_weight = tf.gather(weight, filter_norm_index, axis=3)

                    # 得到性质与weight一样的均值向量tensor
                    one_weight = tf.ones_like(channel_weight)
                    channel_mean = tf.reduce_mean(channel_weight, axis=(0, 1, 2))  # 通道的均值  个数等于过滤器数
                    channel_mean = one_weight * channel_mean
                    one_weight = tf.ones_like(filter_weight)
                    filter_mean = tf.reduce_mean(filter_weight, axis=(0, 1, 3))  # 过滤器的均值  个数等于通道数
                    one_weight = tf.transpose(one_weight, (0, 1, 3, 2))
                    filter_mean = one_weight * filter_mean
                    filter_mean = tf.transpose(filter_mean, (0, 1, 3, 2))

                    # 重新计算范数值，也就是模的大小
                    channel_norm = tf.sqrt(tf.reduce_sum(tf.square(channel_weight), axis=(0, 1, 3)))  # 模的大小
                    channel_mean_norm = tf.sqrt(tf.reduce_sum(tf.square(channel_mean), axis=(0, 1, 3)))
                    filter_norm = tf.sqrt(tf.reduce_sum(tf.square(filter_weight), axis=(0, 1, 2)))
                    filter_mean_norm = tf.sqrt(tf.reduce_sum(tf.square(filter_mean), axis=(0, 1, 2)))
                    # 向量与均值向量点积
                    channel_dot_mean = tf.reduce_sum(channel_weight * channel_mean, axis=(0, 1, 3))
                    filter_dot_mean = tf.reduce_sum(filter_weight * filter_mean, axis=(0, 1, 2))

                    # 求出向量到均值向量的角余弦值
                    cos_channel = tf.divide(channel_dot_mean, channel_norm * channel_mean_norm + tf.constant(0.0000001))
                    cos_filter = tf.divide(filter_dot_mean, filter_norm * filter_mean_norm + tf.constant(0.0000001))
                    # 余弦相似度 取值范围为[0, 1]
                    similarity_channel = 1 - tf.divide(tf.acos(cos_channel), pi)
                    similarity_filter = 1 - tf.divide(tf.acos(cos_filter), pi)

                    cos += tf.reduce_sum(similarity_filter) * self.args.var_1
                    cos += tf.reduce_sum(similarity_channel) * self.args.var_2
            _var_reg = cos

        if self.args.gl_1 != 0.0 or self.args.gl_2 != 0.0:
            loss_gl = []
            for weight in self.model.trainable_variables:
                if 'conv' in weight.name and 'kernel' in weight.name:
                    t1 = tf.reduce_sum(tf.abs(weight), axis=[0, 1, 2])
                    t2 = tf.reduce_sum(tf.abs(weight), axis=[0, 1, 3])
                    t1 = t1 * t1
                    t2 = t2 * t2
                    t1 = tf.sqrt(tf.reduce_sum(t1))
                    t2 = tf.sqrt(tf.reduce_sum(t2))
                    loss_gl.append(t1 * self.args.gl_1 + t2 * self.args.gl_2)
            _grouplasso = tf.reduce_sum(tf.stack(loss_gl))

        return _cost + _l1_reg + _l2_reg + _var_reg + _grouplasso


class CrossentropyLoss(tf.keras.losses.Loss):
    def __init__(self, model, args, name="crossentropyloss"):
        super().__init__(name=name)
        self.model = model
        self.args = args

    def call(self, y_true, y_pred):
        _cost = tf.losses.categorical_crossentropy(y_true, y_pred, from_logits=True)
        return _cost


class L1Loss(tf.keras.losses.Loss):
    def __init__(self, model, args, name="l1"):
        super().__init__(name=name)
        self.model = model
        self.args = args

    def call(self, y_true, y_pred):
        _l1_reg = 0

        if self.args.l1_value != 0.0:
            loss_regularization = []
            for p in self.model.trainable_variables:
                if 'conv' in p.name and 'kernel' in p.name:
                    loss_regularization.append(tf.math.reduce_sum(tf.math.abs(p)))
            _l1_reg = self.args.l1_value * tf.reduce_sum(tf.stack(loss_regularization))

        return _l1_reg


class L2Loss(tf.keras.losses.Loss):
    def __init__(self, model, args, name="l2"):
        super().__init__(name=name)
        self.model = model
        self.args = args

    def call(self, y_true, y_pred):
        _l2_reg = 0

        if self.args.l2_value != 0.0:
            loss_regularization = []
            for p in self.model.trainable_variables:
                if 'conv' in p.name and 'kernel' in p.name:
                    loss_regularization.append(tf.nn.l2_loss(p))
            _l2_reg = self.args.l2_value * tf.reduce_sum(tf.stack(loss_regularization))

        return _l2_reg


class SeparateAngleLoss(tf.keras.losses.Loss):
    def __init__(self, model, args, name="separate_angle"):
        super().__init__(name=name)
        self.model = model
        self.args = args

    def call(self, y_true, y_pred):
        _var_reg = 0

        if self.args.var_1 != 0.0 or self.args.var_2 != 0.0:
            cos = tf.constant(0.0)
            pi = tf.constant(math.pi)
            for weight in self.model.trainable_variables:
                if 'conv' in weight.name and 'kernel' in weight.name:
                    # 这里先求出其范数值，然后按范数值大小得到索引，取前x%
                    channel_norm = tf.sqrt(tf.reduce_sum(tf.square(weight), axis=(0, 1, 3)))
                    filter_norm = tf.sqrt(tf.reduce_sum(tf.square(weight), axis=(0, 1, 2)))

                    channel_norm_index = tf.argsort(channel_norm, direction='DESCENDING')
                    channel_norm_index = channel_norm_index[:int(channel_norm_index.shape[0] * self.args.penalty_ratio)]  # 取前x%
                    channel_weight = tf.gather(weight, channel_norm_index, axis=2)
                    filter_norm_index = tf.argsort(filter_norm, direction='DESCENDING')
                    filter_norm_index = filter_norm_index[:int(filter_norm_index.shape[0] * self.args.penalty_ratio)]  # 取前x%
                    filter_weight = tf.gather(weight, filter_norm_index, axis=3)

                    # 得到性质与weight一样的均值向量tensor
                    one_weight = tf.ones_like(channel_weight)
                    channel_mean = tf.reduce_mean(channel_weight, axis=(0, 1, 2))  # 通道的均值  个数等于过滤器数
                    channel_mean = one_weight * channel_mean
                    one_weight = tf.ones_like(filter_weight)
                    filter_mean = tf.reduce_mean(filter_weight, axis=(0, 1, 3))  # 过滤器的均值  个数等于通道数
                    one_weight = tf.transpose(one_weight, (0, 1, 3, 2))
                    filter_mean = one_weight * filter_mean
                    filter_mean = tf.transpose(filter_mean, (0, 1, 3, 2))

                    # 重新计算范数值，也就是模的大小
                    channel_norm = tf.sqrt(tf.reduce_sum(tf.square(channel_weight), axis=(0, 1, 3)))  # 模的大小
                    channel_mean_norm = tf.sqrt(tf.reduce_sum(tf.square(channel_mean), axis=(0, 1, 3)))
                    filter_norm = tf.sqrt(tf.reduce_sum(tf.square(filter_weight), axis=(0, 1, 2)))
                    filter_mean_norm = tf.sqrt(tf.reduce_sum(tf.square(filter_mean), axis=(0, 1, 2)))
                    # 向量与均值向量点积
                    channel_dot_mean = tf.reduce_sum(channel_weight * channel_mean, axis=(0, 1, 3))
                    filter_dot_mean = tf.reduce_sum(filter_weight * filter_mean, axis=(0, 1, 2))

                    # 求出向量到均值向量的角余弦值
                    cos_channel = tf.divide(channel_dot_mean, channel_norm * channel_mean_norm + tf.constant(0.0000001))
                    cos_filter = tf.divide(filter_dot_mean, filter_norm * filter_mean_norm + tf.constant(0.0000001))
                    # 余弦相似度 取值范围为[0, 1]
                    similarity_channel = 1 - tf.divide(tf.acos(cos_channel), pi)
                    similarity_filter = 1 - tf.divide(tf.acos(cos_filter), pi)

                    cos += tf.reduce_sum(similarity_filter) * self.args.var_1
                    cos += tf.reduce_sum(similarity_channel) * self.args.var_2
            _var_reg = cos

        return _var_reg


class GroupLassoLoss(tf.keras.losses.Loss):
    def __init__(self, model, args, name="group_lasso"):
        super().__init__(name=name)
        self.model = model
        self.args = args

    def call(self, y_true, y_pred):
        _grouplasso = 0

        if self.args.gl_1 != 0.0 or self.args.gl_2 != 0.0:
            loss_gl = []
            for weight in self.model.trainable_variables:
                if 'conv' in weight.name and 'kernel' in weight.name:
                    t1 = tf.reduce_sum(tf.abs(weight), axis=[0, 1, 2])
                    t2 = tf.reduce_sum(tf.abs(weight), axis=[0, 1, 3])
                    t1 = t1 * t1
                    t2 = t2 * t2
                    t1 = tf.sqrt(tf.reduce_sum(t1))
                    t2 = tf.sqrt(tf.reduce_sum(t2))
                    loss_gl.append(t1 * self.args.gl_1 + t2 * self.args.gl_2)
            _grouplasso = tf.reduce_sum(tf.stack(loss_gl))

        return _grouplasso
