import numpy as np
from sklearn.metrics import confusion_matrix, normalized_mutual_info_score, accuracy_score, f1_score, adjusted_rand_score
from scipy.optimize import linear_sum_assignment
from typing import List, Tuple

def align_labels_with_hungarian(labels_true: np.ndarray, labels_pred: np.ndarray) -> np.ndarray:
    """
    Align predicted cluster labels to true labels using the Hungarian algorithm.

    Args:
        labels_true (np.ndarray): True labels.
        labels_pred (np.ndarray): Predicted cluster labels.

    Returns:
        np.ndarray: Aligned predicted labels.
    """
    cmat = confusion_matrix(labels_true, labels_pred)
    row_ind, col_ind = linear_sum_assignment(-cmat)
    label_mapping = {old_label: new_label for old_label, new_label in zip(col_ind, row_ind)}
    labels_aligned = np.array([label_mapping.get(label, label) for label in labels_pred])
    return labels_aligned

def calculate_pur(confusion_matrix: np.ndarray) -> float:
    """
    Calculate the Purity (PUR) metric from a confusion matrix.

    Args:
        confusion_matrix (np.ndarray): The confusion matrix.

    Returns:
        float: The PUR score.
    """
    cluster_sizes = confusion_matrix.sum(axis=0)
    max_class_sizes = confusion_matrix.max(axis=0)
    pur = np.sum(max_class_sizes) / np.sum(cluster_sizes)
    return pur

def cal_metrics(true_label: np.ndarray, pred_label: np.ndarray) -> Tuple[float, float, float, float, float]:
    """
    Calculate various clustering evaluation metrics.

    Args:
        true_label (np.ndarray): True labels.
        pred_label (np.ndarray): Predicted cluster labels.

    Returns:
        Tuple[float, float, float, float, float]: A tuple containing (accuracy, NMI, F1 score, ARI, PUR).
    """
    if len(true_label) != len(pred_label):
        raise ValueError("The lengths of true_label and pred_label must be the same.")

    nmi = normalized_mutual_info_score(true_label, pred_label)
    ari = adjusted_rand_score(true_label, pred_label)
    
    new_predict = align_labels_with_hungarian(true_label, pred_label)
    
    
    acc = accuracy_score(true_label, new_predict)
    f1 = f1_score(true_label, new_predict, average='macro')
    
    # Calculate PUR
    cmat = confusion_matrix(true_label, new_predict)
    pur = calculate_pur(cmat)
    
    return acc, nmi, f1, ari