import numpy as np
import torch
import torch.nn.functional as F


def multi_class_iou_score(output, target, num_classes, ignore_index=None):
    """
    计算多类分割的IoU分数
    Args:
        output: 模型输出 (batch_size, num_classes, H, W)
        target: 真实标签 (batch_size, H, W) 或 (batch_size, num_classes, H, W)
        num_classes: 类别数量
        ignore_index: 要忽略的类别索引
    Returns:
        mean_iou: 平均IoU分数
    """
    smooth = 1e-5

    # 如果target是one-hot编码，转换为类别索引
    if target.dim() == 4:
        target = torch.argmax(target, dim=1)

    # 将模型输出转换为类别预测
    if output.size(1) > 1:  # 多类分割
        output = torch.softmax(output, dim=1)
        output = torch.argmax(output, dim=1)
    else:  # 二分类
        output = torch.sigmoid(output) > 0.5
        output = output.long().squeeze(1)

    iou_per_class = []
    for class_idx in range(num_classes):
        if ignore_index is not None and class_idx == ignore_index:
            continue

        output_class = (output == class_idx)
        target_class = (target == class_idx)

        # 创建有效掩码（忽略特定类别）
        if ignore_index is not None:
            valid_mask = (target != ignore_index)
            output_class = output_class & valid_mask
            target_class = target_class & valid_mask

        intersection = (output_class & target_class).float().sum()
        union = (output_class | target_class).float().sum()

        if union == 0:
            iou_per_class.append(float('nan'))
        else:
            iou = (intersection + smooth) / (union + smooth)
            iou_per_class.append(iou.item())

    # 计算平均IoU（忽略NaN值）
    valid_iou = [iou for iou in iou_per_class if not np.isnan(iou)]
    if not valid_iou:
        return 0.0

    mean_iou = sum(valid_iou) / len(valid_iou)
    return mean_iou


def multi_class_dice_score(output, target, num_classes, ignore_index=None):
    """
    计算多类分割的Dice系数
    Args:
        output: 模型输出 (batch_size, num_classes, H, W)
        target: 真实标签 (batch_size, H, W) 或 (batch_size, num_classes, H, W)
        num_classes: 类别数量
        ignore_index: 要忽略的类别索引
    Returns:
        mean_dice: 平均Dice系数
    """
    smooth = 1e-5

    # 如果target是类别索引，转换为one-hot编码
    if target.dim() == 3:
        target_onehot = F.one_hot(target, num_classes).permute(0, 3, 1, 2).float()
    else:
        target_onehot = target

    # 将模型输出转换为概率
    if output.size(1) > 1:  # 多类分割
        output_probs = torch.softmax(output, dim=1)
    else:  # 二分类
        output_probs = torch.sigmoid(output)
        output_probs = torch.cat([1 - output_probs, output_probs], dim=1)

    dice_per_class = []
    for class_idx in range(num_classes):
        if ignore_index is not None and class_idx == ignore_index:
            continue

        output_class = output_probs[:, class_idx, :, :]
        target_class = target_onehot[:, class_idx, :, :]

        intersection = (output_class * target_class).sum()
        union = output_class.sum() + target_class.sum()

        if union == 0:
            dice_per_class.append(float('nan'))
        else:
            dice = (2. * intersection + smooth) / (union + smooth)
            dice_per_class.append(dice.item())

    # 计算平均Dice（忽略NaN值）
    valid_dice = [dice for dice in dice_per_class if not np.isnan(dice)]
    if not valid_dice:
        return 0.0

    mean_dice = sum(valid_dice) / len(valid_dice)
    return mean_dice


def pixel_accuracy(output, target):
    """
    计算像素精度
    Args:
        output: 模型输出 (batch_size, num_classes, H, W)
        target: 真实标签 (batch_size, H, W) 或 (batch_size, num_classes, H, W)
    Returns:
        accuracy: 像素精度
    """
    # 如果target是one-hot编码，转换为类别索引
    if target.dim() == 4:
        target = torch.argmax(target, dim=1)

    # 将模型输出转换为类别预测
    if output.size(1) > 1:  # 多类分割
        output = torch.softmax(output, dim=1)
        output = torch.argmax(output, dim=1)
    else:  # 二分类
        output = torch.sigmoid(output) > 0.5
        output = output.long().squeeze(1)

    correct = (output == target).float()
    accuracy = correct.sum() / correct.numel()

    return accuracy.item()


def per_class_iou(output, target, num_classes, ignore_index=None):
    """
    计算每个类别的IoU分数
    Args:
        output: 模型输出 (batch_size, num_classes, H, W)
        target: 真实标签 (batch_size, H, W)
        num_classes: 类别数量
        ignore_index: 要忽略的类别索引
    Returns:
        iou_per_class: 每个类别的IoU分数列表
    """
    smooth = 1e-5

    # 将模型输出转换为类别预测
    if output.size(1) > 1:  # 多类分割
        output = torch.softmax(output, dim=1)
        output = torch.argmax(output, dim=1)
    else:  # 二分类
        output = torch.sigmoid(output) > 0.5
        output = output.long().squeeze(1)

    iou_per_class = []
    for class_idx in range(num_classes):
        if ignore_index is not None and class_idx == ignore_index:
            iou_per_class.append(float('nan'))
            continue

        output_class = (output == class_idx)
        target_class = (target == class_idx)

        intersection = (output_class & target_class).float().sum()
        union = (output_class | target_class).float().sum()

        if union == 0:
            iou_per_class.append(float('nan'))
        else:
            iou = (intersection + smooth) / (union + smooth)
            iou_per_class.append(iou.item())

    return iou_per_class


def per_class_dice(output, target, num_classes, ignore_index=None):
    """
    计算每个类别的Dice系数
    Args:
        output: 模型输出 (batch_size, num_classes, H, W)
        target: 真实标签 (batch_size, H, W)
        num_classes: 类别数量
        ignore_index: 要忽略的类别索引
    Returns:
        dice_per_class: 每个类别的Dice系数列表
    """
    smooth = 1e-5

    # 将target转换为one-hot编码
    target_onehot = F.one_hot(target, num_classes).permute(0, 3, 1, 2).float()

    # 将模型输出转换为概率
    if output.size(1) > 1:  # 多类分割
        output_probs = torch.softmax(output, dim=1)
    else:  # 二分类
        output_probs = torch.sigmoid(output)
        output_probs = torch.cat([1 - output_probs, output_probs], dim=1)

    dice_per_class = []
    for class_idx in range(num_classes):
        if ignore_index is not None and class_idx == ignore_index:
            dice_per_class.append(float('nan'))
            continue

        output_class = output_probs[:, class_idx, :, :]
        target_class = target_onehot[:, class_idx, :, :]

        intersection = (output_class * target_class).sum()
        union = output_class.sum() + target_class.sum()

        if union == 0:
            dice_per_class.append(float('nan'))
        else:
            dice = (2. * intersection + smooth) / (union + smooth)
            dice_per_class.append(dice.item())

    return dice_per_class


def confusion_matrix(output, target, num_classes):
    """
    计算混淆矩阵
    Args:
        output: 模型输出 (batch_size, num_classes, H, W)
        target: 真实标签 (batch_size, H, W)
        num_classes: 类别数量
    Returns:
        cm: 混淆矩阵 (num_classes, num_classes)
    """
    # 将模型输出转换为类别预测
    if output.size(1) > 1:  # 多类分割
        output = torch.softmax(output, dim=1)
        output = torch.argmax(output, dim=1)
    else:  # 二分类
        output = torch.sigmoid(output) > 0.5
        output = output.long().squeeze(1)

    # 初始化混淆矩阵
    cm = torch.zeros(num_classes, num_classes, dtype=torch.long)

    # 填充混淆矩阵
    for i in range(num_classes):
        for j in range(num_classes):
            cm[i, j] = ((output == i) & (target == j)).sum().item()

    return cm


def precision_recall_f1(output, target, num_classes, ignore_index=None):
    """
    计算每个类别的精确率、召回率和F1分数
    Args:
        output: 模型输出 (batch_size, num_classes, H, W)
        target: 真实标签 (batch_size, H, W)
        num_classes: 类别数量
        ignore_index: 要忽略的类别索引
    Returns:
        precision: 每个类别的精确率
        recall: 每个类别的召回率
        f1: 每个类别的F1分数
    """
    smooth = 1e-5

    # 将模型输出转换为类别预测
    if output.size(1) > 1:  # 多类分割
        output = torch.softmax(output, dim=1)
        output = torch.argmax(output, dim=1)
    else:  # 二分类
        output = torch.sigmoid(output) > 0.5
        output = output.long().squeeze(1)

    precision_per_class = []
    recall_per_class = []
    f1_per_class = []

    for class_idx in range(num_classes):
        if ignore_index is not None and class_idx == ignore_index:
            precision_per_class.append(float('nan'))
            recall_per_class.append(float('nan'))
            f1_per_class.append(float('nan'))
            continue

        output_class = (output == class_idx)
        target_class = (target == class_idx)

        true_positives = (output_class & target_class).float().sum()
        false_positives = (output_class & ~target_class).float().sum()
        false_negatives = (~output_class & target_class).float().sum()

        # 计算精确率
        if true_positives + false_positives == 0:
            precision = float('nan')
        else:
            precision = true_positives / (true_positives + false_positives + smooth)
            precision = precision.item()

        # 计算召回率
        if true_positives + false_negatives == 0:
            recall = float('nan')
        else:
            recall = true_positives / (true_positives + false_negatives + smooth)
            recall = recall.item()

        # 计算F1分数
        if np.isnan(precision) or np.isnan(recall) or (precision + recall == 0):
            f1 = float('nan')
        else:
            f1 = 2 * (precision * recall) / (precision + recall + smooth)
            f1 = f1.item()

        precision_per_class.append(precision)
        recall_per_class.append(recall)
        f1_per_class.append(f1)

    return precision_per_class, recall_per_class, f1_per_class