import torch
import numpy as np
from sklearn.metrics import confusion_matrix


def calculate_dice(pred, target, num_classes):
    """计算Dice系数"""
    
    dice_scores = []
    for class_id in range(num_classes):
        # 提取当前类别的预测和真实标签
        pred_class = (pred == class_id).float()
        target_class = (target == class_id).float()
        
        # 计算交集和并集
        intersection = (pred_class * target_class).sum()
        union = pred_class.sum() + target_class.sum()
        
        # 避免除以零
        if union == 0:
            dice_scores.append(1.0)  # 如果该类别在预测和真实标签中都不存在，则Dice为1
        else:
            dice = (2. * intersection) / union
            dice_scores.append(dice.item())
            
    return np.array(dice_scores)


def calculate_iou(pred, target, num_classes):
    """计算IoU (Jaccard Index)"""
    
    iou_scores = []
    for class_id in range(num_classes):
        pred_class = (pred == class_id).float()
        target_class = (target == class_id).float()
        
        intersection = (pred_class * target_class).sum()
        union = pred_class.sum() + target_class.sum() - intersection
        
        if union == 0:
            iou_scores.append(1.0)
        else:
            iou = intersection / union
            iou_scores.append(iou.item())
            
    return np.array(iou_scores)


def calculate_accuracy(pred, target):
    """计算像素准确率"""
    
    correct_pixels = (pred == target).sum().item()
    total_pixels = target.numel()
    accuracy = correct_pixels / total_pixels
    return accuracy


def calculate_precision_recall_f1(pred, target, num_classes):
    """计算精确率、召回率和F1分数"""
    
    precision_scores = []
    recall_scores = []
    f1_scores = []
    
    for class_id in range(num_classes):
        tp = ((pred == class_id) & (target == class_id)).sum().item()
        fp = ((pred == class_id) & (target != class_id)).sum().item()
        fn = ((pred != class_id) & (target == class_id)).sum().item()
        
        precision = tp / (tp + fp) if (tp + fp) > 0 else 0.0
        recall = tp / (tp + fn) if (tp + fn) > 0 else 0.0
        f1 = (2 * precision * recall) / (precision + recall) if (precision + recall) > 0 else 0.0
        
        precision_scores.append(precision)
        recall_scores.append(recall)
        f1_scores.append(f1)
        
    return np.array(precision_scores), np.array(recall_scores), np.array(f1_scores)


def get_confusion_matrix(pred, target, num_classes):
    """计算混淆矩阵"""
    
    pred_flat = pred.view(-1).cpu().numpy()
    target_flat = target.view(-1).cpu().numpy()
    cm = confusion_matrix(target_flat, pred_flat, labels=list(range(num_classes)))
    return cm


def evaluate_metrics(pred_masks, true_masks, num_classes):
    """评估所有指标"""
    
    all_dice = []
    all_iou = []
    all_accuracy = []
    all_precision = []
    all_recall = []
    all_f1 = []
    all_cm = np.zeros((num_classes, num_classes), dtype=int)
    
    for pred, target in zip(pred_masks, true_masks):
        # 确保pred和target在CPU上且为LongTensor
        pred = pred.long().cpu()
        target = target.long().cpu()
        
        all_dice.append(calculate_dice(pred, target, num_classes))
        all_iou.append(calculate_iou(pred, target, num_classes))
        all_accuracy.append(calculate_accuracy(pred, target))
        
        precision, recall, f1 = calculate_precision_recall_f1(pred, target, num_classes)
        all_precision.append(precision)
        all_recall.append(recall)
        all_f1.append(f1)
        
        all_cm += get_confusion_matrix(pred, target, num_classes)
        
    metrics = {
        "dice": np.mean(all_dice, axis=0).tolist(),
        "iou": np.mean(all_iou, axis=0).tolist(),
        "accuracy": np.mean(all_accuracy).item(),
        "precision": np.mean(all_precision, axis=0).tolist(),
        "recall": np.mean(all_recall, axis=0).tolist(),
        "f1_score": np.mean(all_f1, axis=0).tolist(),
        "confusion_matrix": all_cm.tolist()
    }
    
    return metrics


if __name__ == "__main__":
    # 简单测试
    num_classes = 3
    
    # 模拟预测和真实标签
    pred = torch.randint(0, num_classes, (1, 10, 10))
    target = torch.randint(0, num_classes, (1, 10, 10))
    
    print("--- 单个样本测试 ---")
    dice = calculate_dice(pred[0], target[0], num_classes)
    iou = calculate_iou(pred[0], target[0], num_classes)
    acc = calculate_accuracy(pred[0], target[0])
    prec, rec, f1 = calculate_precision_recall_f1(pred[0], target[0], num_classes)
    cm = get_confusion_matrix(pred[0], target[0], num_classes)
    
    print(f"Dice: {dice}")
    print(f"IoU: {iou}")
    print(f"Accuracy: {acc}")
    print(f"Precision: {prec}")
    print(f"Recall: {rec}")
    print(f"F1 Score: {f1}")
    print(f"Confusion Matrix:\n{cm}")
    
    print("\n--- 批量样本测试 ---")
    pred_batch = [torch.randint(0, num_classes, (10, 10)) for _ in range(5)]
    target_batch = [torch.randint(0, num_classes, (10, 10)) for _ in range(5)]
    
    metrics_results = evaluate_metrics(pred_batch, target_batch, num_classes)
    for k, v in metrics_results.items():
        print(f"{k}: {v}")

