import torch
from sklearn.metrics import precision_score, recall_score, f1_score
import numpy as np

def calculate_precision_recall_f1(outputs, labels, average='macro'):
    """
    计算精确率、召回率和F1分数
    
    Args:
        outputs: 模型输出的logits
        labels: 真实标签
        average: 'macro', 'micro', 'weighted' 或 'binary'
    
    Returns:
        precision, recall, f1: 精确率、召回率、F1分数
    """
    _, preds = torch.max(outputs, 1)
    
    # 转换为numpy数组
    preds_np = preds.cpu().numpy()
    labels_np = labels.cpu().numpy()
    
    # 使用sklearn计算正确的指标
    precision = precision_score(labels_np, preds_np, average=average, zero_division=0)
    recall = recall_score(labels_np, preds_np, average=average, zero_division=0)
    f1 = f1_score(labels_np, preds_np, average=average, zero_division=0)
    
    return precision, recall, f1

def calculate_accuracy(outputs, labels):
    """
    计算准确率
    """
    _, preds = torch.max(outputs, 1)
    correct = (preds == labels).sum().item()
    total = labels.size(0)
    accuracy = correct / total
    return accuracy


def calculate_top_k_accuracy(outputs, labels, k=5):
    top_k_preds = torch.topk(outputs, k, dim=1).indices
    correct = top_k_preds.eq(labels.view(-1, 1).expand_as(top_k_preds))
    top_k_acc = correct.any(dim=1).float().mean().item()
    return top_k_acc


def calculate_confusion_matrix(outputs, labels, num_classes):
    _, preds = torch.max(outputs, 1)
    conf_matrix = torch.zeros(num_classes, num_classes)
    for t, p in zip(labels.view(-1), preds.view(-1)):
        conf_matrix[t.long(), p.long()] += 1
    return conf_matrix
