# -*- coding: utf-8 -*-
# ===========================================
# @Time    : 2021/9/14 9:36 
# @Author  : shutao
# @FileName: affinity_loss.py
# @remark  : 
# 
# @Software: PyCharm
# Github 　： https://github.com/NameLacker
# ===========================================

import paddle
import paddle.nn as nn
import paddle.nn.functional as F

from ...utils import ele_sub
from .. import BaseNet


def _construction_ideal_affinity_map(gt_label, cls_size, n_classes):
    # downsample
    L = F.upsample(gt_label.unsqueeze(1), size=cls_size, mode='nearest')
    # one of K
    L = F.one_hot(L.squeeze(1), n_classes).transpose((0, 3, 1, 2))
    # reshape
    N, C, H, W = L.shape
    L = L.reshape((N, C, H * W)).astype(paddle.float32)
    # matrix multip
    A = paddle.bmm(L.transpose((0, 2, 1)), L)
    return A


class AffinityLoss(BaseNet):
    def __init__(self, feature_size, n_classes, reduction='mean', loss_weight=1.0):
        super(AffinityLoss, self).__init__()
        self.feature_size = [feature_size, feature_size]
        self.n_classes = n_classes
        self.reduction = reduction
        self.loss_weight = loss_weight

    def forward(self, cls_score, label, reduction_override=None):
        assert reduction_override in (None, 'none', 'mean', 'sum')
        reduction = (
            reduction_override if reduction_override else self.reduction
        )
        # 构建亲和力标签图
        ideal_affinity_map = _construction_ideal_affinity_map(label, self.feature_size, self.n_classes)

        unary_term = F.binary_cross_entropy(cls_score, ideal_affinity_map, reduction=reduction)

        diagonal_matrix = 1 - paddle.eye(ideal_affinity_map.shape[1])
        vtarget = diagonal_matrix * ideal_affinity_map

        recall_part = paddle.sum(cls_score * vtarget, axis=2)
        denominator = paddle.sum(vtarget, axis=2)
        denominator = ele_sub(denominator, 0., 1.)
        recall_part = recall_part.divide(denominator)
        recall_label = paddle.ones_like(recall_part)
        recall_loss = F.binary_cross_entropy(recall_part, recall_label, reduction=reduction)

        spec_part = paddle.sum((1-cls_score) * (1-ideal_affinity_map), axis=2)
        denominator = paddle.sum(1-ideal_affinity_map, axis=2)
        denominator = ele_sub(denominator, 0., 1.)
        spec_part = spec_part.divide(denominator)
        spec_label = paddle.ones_like(spec_part)
        spec_loss = F.binary_cross_entropy(spec_part, spec_label, reduction=reduction)

        precision_part = paddle.sum(cls_score * vtarget, axis=2)
        denominator = paddle.sum(cls_score, axis=2)
        denominator = ele_sub(denominator, 0., 1.)
        precision_part = precision_part.divide(denominator)
        precision_label = paddle.ones_like(precision_part)
        precision_loss = F.binary_cross_entropy(precision_part, precision_label, reduction=reduction)

        global_term = recall_loss + spec_loss + precision_loss

        loss_cls = self.loss_weight * (unary_term + global_term)
        return loss_cls
