import torch
import torch.nn.functional as F

"""
三重损失,计算三个交叉熵损失,
在4个idea中:前两重为左右脑预测值,第三重为模型预测值
在PSN中:第一重为Patch预测值,第二重为Region预测值, 第三重为模型预测值
"""


class HybridLoss(torch.nn.Module):
    def __init__(self, alpha_p=1.0, alpha_r=1.0):
        super(HybridLoss, self).__init__()
        self.alpha_p = alpha_p
        self.alpha_r = alpha_r

    def forward(self, P_p, P_r, P_s, y_true):
        """
        :param P_p: Patch-level probability predictions, shape: [batch_size, num_classes]
        :param P_r: Region-level probability predictions, shape: [batch_size, num_classes]
        :param P_s: Subject-level probability predictions, shape: [batch_size, num_classes]
        :param y_true: True labels, shape: [batch_size]
        """
        # Convert true labels to one-hot encoding
        y_one_hot = F.one_hot(y_true, num_classes=2).float()

        # Patch-level loss
        loss_p = -torch.mean(torch.sum(y_one_hot * torch.log(P_p + 1e-5), dim=1))

        # Region-level loss
        loss_r = -torch.mean(torch.sum(y_one_hot * torch.log(P_r + 1e-5), dim=1))

        # Subject-level loss
        loss_s = -torch.mean(torch.sum(y_one_hot * torch.log(P_s + 1e-5), dim=1))

        # Hybrid loss
        total_loss = self.alpha_p * loss_p + self.alpha_r * loss_r + loss_s

        return total_loss


def hybrid_loss(patch_pred, region_pred, subject_pred, true_labels):
    patch_loss = F.binary_cross_entropy(patch_pred, true_labels)
    region_loss = F.binary_cross_entropy(region_pred, true_labels)
    subject_loss = F.binary_cross_entropy(subject_pred, true_labels)
    return patch_loss + region_loss + subject_loss


"""
换个写法,没区别,测试两个写法是否输出一致
"""

import torch
import torch.nn as nn
import torch.nn.functional as F


class HybridCrossEntropyLoss(nn.Module):
    def __init__(self, lambda_p=1.0, lambda_r=1.0):
        super(HybridCrossEntropyLoss, self).__init__()
        self.lambda_p = lambda_p
        self.lambda_r = lambda_r

    def forward(self, patch_preds=None, region_preds=None, subject_preds=None, targets=None):
        """
        Arguments:
        patch_preds: Patch-level predictions (batch_size, num_classes)
        region_preds: Region-level predictions (batch_size, num_classes)
        subject_preds: Subject-level predictions (batch_size, num_classes)
        targets: Ground truth labels (batch_size,)
        """
        # Compute patch-level loss
        if patch_preds is not None:
            patch_loss = F.cross_entropy(patch_preds, targets, reduction='mean')
        else:
            patch_loss = 0

        # Compute region-level loss
        if region_preds is not None:
            region_loss = F.cross_entropy(region_preds, targets, reduction='mean')
        else:
            region_loss = 0

        # Compute subject-level loss
        if subject_preds is not None:
            subject_loss = F.cross_entropy(subject_preds, targets, reduction='mean')
        else:
            subject_loss = 0

        # Combine losses with weighting
        total_loss = self.lambda_p * patch_loss + self.lambda_r * region_loss + subject_loss
        return total_loss

# Example usage:
# patch_preds, region_preds, subject_preds are the outputs from the respective networks
# targets are the ground truth labels for each input sample

