import numpy as np
import torch
from sklearn.metrics import precision_score, recall_score, f1_score, roc_auc_score, confusion_matrix
from tqdm import tqdm


def evaluate_landmark_metrics(y_pred, y, threshold=0.5):
    """
    Evaluate landmark metrics (precision, recall, f1, roc_auc) for binary classification
    Args:
        y_pred: predicted probabilities
        y: true labels
        threshold: threshold for binary classification
    Returns:
        result_metrics: {precision, recall, f1, roc_auc}
    """
    def calculate_metrics(true_classes, predicted_classes, pred_probs):
        precision = precision_score(true_classes, predicted_classes, average='binary', zero_division=0)
        recall = recall_score(true_classes, predicted_classes, average='binary', zero_division=0)
        f1 = f1_score(true_classes, predicted_classes, average='binary', zero_division=0)
        auroc = roc_auc_score(true_classes, pred_probs, average='binary', multi_class='ovr')
        cm = confusion_matrix(true_classes, predicted_classes)
        TN = cm[0, 0]
        FP = cm[0, 1]
        specificity = TN / (TN + FP)

        return precision, recall, f1, auroc, specificity
    
    sample_metrics = {
        "total_correct": 0,
        "true_classes": [],
        "predicted_classes": [],
        "pred_probs": []
    }

    # Compute the number of correct predictions
    predicted_classes = (y_pred > threshold).astype(int)
    true_classes = y

    correct = (predicted_classes == true_classes).sum().item()

    sample_metrics["total_correct"] += correct

    # Calculate sample-wise metrics
    accuracy = sample_metrics["total_correct"] / len(y)

    sample_precision, sample_recall, sample_f1, sample_auroc, sample_specificity = calculate_metrics(
        true_classes,
        predicted_classes,
        y_pred
    )

    result_metrics = {
        "accuracy": accuracy,
        "precision": sample_precision,
        "recall": sample_recall,
        "f1_score": sample_f1,
        "auroc": sample_auroc,
        "specificity": sample_specificity
    }

    return result_metrics

