"""
Created by Kostas Triaridis (@kostino)
in August 2023 @ ITI-CERTH
"""
import numpy as np
import torch
from torch import Tensor

from a2s_utils.a2s.util import tran_param, transform


class DiceLoss(torch.nn.Module):
    def __init__(self, ignore_index=-1):
        super().__init__()
        self.eps = 1e-9
        self.ignore_index = ignore_index

    # def forward(self, logits, target):
    #     bs, c, h, w = logits.size()
    #     if bs == 0:
    #         return torch.tensor(0.).to(logits.device)
    #
    #     pred = torch.nn.functional.softmax(logits, dim=1)[:, 1, :, :]
    #
    #     not_ignored_mask = target != self.ignore_index
    #     true = target * not_ignored_mask
    #     pred = pred * not_ignored_mask
    #     dice_losses = (2. * (pred * true).sum(dim=(-1, -2, -3))) / (
    #                 (true * true).sum(dim=(-1, -2, -3)) + (pred * pred).sum(dim=(-1, -2, -3)) + self.eps)
    #     dice_loss_batch = dice_losses.mean()
    #     return 1 - dice_loss_batch
    def forward(self, logits: Tensor, target: Tensor):
        bs, c, h, w = logits.size()
        if bs == 0:
            return torch.tensor(0.).to(logits.device)

        pred = torch.softmax(logits, dim=1)[:, 1, :, :]
        not_ignored_mask = target != self.ignore_index
        true = target * not_ignored_mask
        pred = pred * not_ignored_mask

        intersection = (pred * true).sum(dim=(-1, -2))
        union = (pred + true).sum(dim=(-1, -2))

        dice_losses = 2. * intersection / (union + self.eps)
        dice_loss_batch = dice_losses.mean()

        return 1 - dice_loss_batch

import torch.nn.functional as F
class BtmLoss(torch.nn.Module):
    def __init__(self, ignore_index=-1):
        super().__init__()
        self.ignore_index = ignore_index
    def get_contour(self, label):
        lbl = label.gt(0.5).float()
        ero = 1 - F.max_pool2d(1 - lbl, kernel_size=5, stride=1, padding=2)  # erosion
        dil = F.max_pool2d(lbl, kernel_size=5, stride=1, padding=2)  # dilation

        edge = dil - ero
        return edge

    # Boundary-aware Texture Matching Loss
    def forward(self, pred, image,gt, radius=5, config=None):
    # def forward(self, pred, image,gt, radius=3, config=None):
        alpha = 200 # 200
        # alpha = 100 # 200
        image = torch.cat(image, dim=1)
        # alpha = config['rgb']  # 200
        # modal = config['trset']  # cdot -> c
        modal = 'c'  # cdot -> c
        # modal = 'cdot'  # cdot -> c
        num_modal = len(modal) if 'c' in modal else len(modal) + 1
        slices = range(0, 3 * num_modal + 1, 3)
        sal_map = F.interpolate(pred, scale_factor=0.25, mode='bilinear', align_corners=True)
        # gt_ = F.interpolate(gt, scale_factor=0.25, mode='bilinear', align_corners=True)
        image_ = F.interpolate(image, size=sal_map.shape[-2:], mode='bilinear', align_corners=True)
        mask = self.get_contour(sal_map)
        # mask = self.get_contour(gt_)
        #-----
        # image_ = torch.sigmoid(image_)
        #-----
        features = torch.cat([image_, sal_map], dim=1)
        # features = torch.cat([image_, sal_map], dim=1)

        N, C, H, W = features.shape
        diameter = 2 * radius + 1
        kernels = F.unfold(features, diameter, 1, radius).view(N, C, diameter, diameter, H, W)
        kernels = kernels - kernels[:, :, radius, radius, :, :].view(N, C, 1, 1, H, W)
        dis_modal = 1
        for idx, slice in enumerate(slices):
            if idx == len(slices) - 1:
                continue
            dis_map = (-alpha * kernels[:, slice:slices[idx + 1]] ** 2).sum(dim=1, keepdim=True).exp()
            # Only RGB
            # if config['only_rgb'] and idx > 0:
            # if False and idx > 0:
            if True and idx > 0:
                dis_map = dis_map * 0 + 1
            dis_modal = dis_modal * dis_map

        dis_sal = torch.abs(kernels[:, slices[-1]:])
        distance = dis_modal * dis_sal

        loss = distance.view(N, 1, (radius * 2 + 1) ** 2, H, W).sum(dim=2)  # 1,121,128,128
        loss = torch.sum(loss * mask) / (torch.sum(mask) + 1e-7)
        # loss = loss.mean()
        # loss = torch.sum(loss) / (torch.sum(mask) +  1e-7)
        return loss

class CSDLoss(torch.nn.Module):
    def __init__(self, ignore_index=-1):
        super().__init__()

    # Confidenceaware Saliency Distilling Loss
    def forward(self, pred, epoch=1):
        mul = 2 ** (1 - (epoch - 1) / 100)
        loss_map = torch.abs(pred - 0.5)
        loss_map = torch.pow(loss_map, mul)

        # The pow(0.5, mul) is used to keep the loss greater than 0. It has no impact on the training process.
        loss = pow(0.5, mul) - loss_map.mean()
        return loss


def IOU(pred, target):
    inter = target * pred
    union = target + pred - target * pred
    iou_loss = 1 - torch.sum(inter, dim=(1, 2, 3)) / (torch.sum(union, dim=(1, 2, 3)) + 1e-7)
    return iou_loss.mean()
class TruForLoss(torch.nn.Module):
    def __init__(self, lambda_ce: float = 0.3, ignore_index: int = -1, device='cuda:1', weights=torch.tensor([0.5, 2.5])):
        super().__init__()
        self.lambda_ce = lambda_ce
        self.ignore_index = ignore_index
        self.criterion_bce = torch.nn.CrossEntropyLoss(weight=weights, ignore_index=self.ignore_index)
        # self.criterion_bce = torch.nn.CrossEntropyLoss( ignore_index=self.ignore_index)
        self.criterion_dice = DiceLoss(ignore_index=self.ignore_index)
        self.btm = BtmLoss(ignore_index=self.ignore_index)
        # self.csd = CSDLoss()
    def forward(self, logits: Tensor, target: Tensor, image, epoch):

        loss_bce = self.criterion_bce(logits, target)
        loss_dice = self.criterion_dice(logits, target)

        p = torch.nn.functional.softmax(logits, dim=1)[:, 1:2, :, :]
        loss_btm = self.btm(p, image[0:1], target.unsqueeze(1).float())
        # loss_csd = self.csd(p, epoch)
        loss = self.lambda_ce * loss_bce + (1 - self.lambda_ce - 0.1) * loss_dice + 0.1 * loss_btm
        # loss = self.lambda_ce * loss_bce + (1 - self.lambda_ce) * loss_dice

        # return loss, loss_btm
        return loss

class TruForLoss_sal(torch.nn.Module):
    def __init__(self, lambda_ce: float = 0.2, ignore_index: int = -1, device='cuda:1', weights=torch.tensor([0.5, 2.5])):
        super().__init__()
        self.lambda_ce = lambda_ce
        self.ignore_index = ignore_index
        # self.criterion_bce = torch.nn.CrossEntropyLoss(weight=weights, ignore_index=self.ignore_index)
        # self.criterion_bce = torch.nn.CrossEntropyLoss( ignore_index=self.ignore_index)
        self.criterion_dice = DiceLoss(ignore_index=self.ignore_index)
    def forward(self, logits: Tensor, target: Tensor, epoch):

        # loss_bce = self.criterion_bce(logits, target)
        p = torch.nn.functional.softmax(logits, dim=1)[:, 1:2, :, :]
        loss_dice = self.criterion_dice(logits, target)
        loss_iou = IOU(p, target)
        loss = self.lambda_ce * loss_iou + (1 - self.lambda_ce) * loss_dice

        return loss
class TruForLossPhase2(torch.nn.Module):
    def __init__(self, lambda_det: float = 0.5, ignore_index: int = -1):
        super().__init__()
        self.lambda_det = lambda_det
        self.ignore_index = ignore_index
        self.criterion_detect = torch.nn.BCEWithLogitsLoss()
        self.criterion_conf = torch.nn.MSELoss(reduction='none')

    def forward(self, anomaly: Tensor, gt_mask: Tensor, conf: Tensor, detect: Tensor, label: Tensor) -> Tensor:
        anomaly = torch.softmax(anomaly, dim=1)[:, 1, :, :]
        t = gt_mask * anomaly + (1 - gt_mask) * (1 - anomaly)

        valid = gt_mask != self.ignore_index
        mse = self.criterion_conf(conf.squeeze(1), t)
        Lconf = mse[valid].mean()

        Ldet = self.criterion_detect(detect.squeeze(1), label.to(torch.float32))

        loss = Lconf + Ldet * self.lambda_det

        return loss
