"""
Evaluation metrics for sleep stage classification
"""
import numpy as np
import torch
from sklearn.metrics import accuracy_score, confusion_matrix, f1_score, precision_score, recall_score, cohen_kappa_score
import pandas as pd


def calculate_overall_metrics(y_true, y_pred, num_classes=5):
    """
    Calculate overall performance metrics

    Args:
        y_true (numpy.ndarray): Ground truth labels
        y_pred (numpy.ndarray): Predicted labels
        num_classes (int): Number of classes

    Returns:
        dict: Dictionary containing metrics
    """
    # Handle case where some classes might not be present
    labels = np.arange(num_classes)

    accuracy = accuracy_score(y_true, y_pred)
    macro_f1 = f1_score(y_true, y_pred, labels=labels, average='macro', zero_division=0)
    weighted_f1 = f1_score(y_true, y_pred, labels=labels, average='weighted', zero_division=0)
    kappa = cohen_kappa_score(y_true, y_pred, labels=labels)

    return {
        'accuracy': accuracy,
        'macro_f1': macro_f1,
        'weighted_f1': weighted_f1,
        'kappa': kappa
    }


def calculate_per_class_metrics(y_true, y_pred, num_classes=5):
    """
    Calculate per-class performance metrics

    Args:
        y_true (numpy.ndarray): Ground truth labels
        y_pred (numpy.ndarray): Predicted labels
        num_classes (int): Number of classes

    Returns:
        pandas.DataFrame: DataFrame containing per-class metrics
    """
    # Handle case where some classes might not be present
    labels = np.arange(num_classes)

    # Calculate metrics
    precision = precision_score(y_true, y_pred, labels=labels, average=None, zero_division=0)
    recall = recall_score(y_true, y_pred, labels=labels, average=None, zero_division=0)
    f1 = f1_score(y_true, y_pred, labels=labels, average=None, zero_division=0)

    # Create DataFrame
    class_names = ['W', 'N1', 'N2', 'N3', 'REM']
    df = pd.DataFrame({
        'Class': class_names,
        'Precision': precision,
        'Recall': recall,
        'F1-Score': f1
    })

    return df


def calculate_confusion_matrix(y_true, y_pred, normalize=None, num_classes=5):
    """
    Calculate confusion matrix

    Args:
        y_true (numpy.ndarray): Ground truth labels
        y_pred (numpy.ndarray): Predicted labels
        normalize (str, optional): Normalization method ('true', 'pred', 'all', None)
        num_classes (int): Number of classes

    Returns:
        numpy.ndarray: Confusion matrix
    """
    # Handle case where some classes might not be present
    labels = np.arange(num_classes)

    cm = confusion_matrix(y_true, y_pred, labels=labels)

    if normalize == 'true':
        cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]
    elif normalize == 'pred':
        cm = cm.astype('float') / cm.sum(axis=0)[np.newaxis, :]
    elif normalize == 'all':
        cm = cm.astype('float') / cm.sum()

    return cm


def evaluate_model(model, dataloader, device, mode='feature'):
    """
    Evaluate the model on the given dataloader

    Args:
        model (torch.nn.Module): Model to evaluate
        dataloader (torch.utils.data.DataLoader): DataLoader for evaluation
        device (torch.device): Device to use
        mode (str): Model mode ('feature', 'sequence', 'full')

    Returns:
        tuple: (y_true, y_pred, all_logits)
    """
    model.eval()
    y_true = []
    y_pred = []
    all_logits = []

    with torch.no_grad():
        for batch in dataloader:
            if mode == 'feature':
                # Feature extraction mode
                eeg = batch['eeg'].to(device)
                labels = batch['label'].to(device)

                # Forward pass
                logits = model(eeg, mode='feature')

                # Get predictions
                _, predictions = torch.max(logits, dim=1)

                # Collect results
                y_true.extend(labels.cpu().numpy())
                y_pred.extend(predictions.cpu().numpy())
                all_logits.extend(logits.cpu().numpy())

            elif mode == 'sequence' or mode == 'full':
                # Sequence learning mode or full model mode
                if mode == 'sequence':
                    # We're using pre-extracted features
                    features = batch['features'].to(device)
                    target = batch['target'].to(device)

                    # Forward pass
                    logits, _, _ = model(features, mode='sequence')
                else:  # Full model mode
                    eeg_seq = batch['eeg_seq'].to(device)
                    target = batch['target'].to(device)

                    # Forward pass
                    logits, _, _ = model(eeg_seq, mode='full')

                # Get predictions
                _, predictions = torch.max(logits, dim=1)

                # Collect results
                y_true.extend(target.cpu().numpy())
                y_pred.extend(predictions.cpu().numpy())
                all_logits.extend(logits.cpu().numpy())

    return np.array(y_true), np.array(y_pred), np.array(all_logits)


def generate_evaluation_report(y_true, y_pred, class_names=['W', 'N1', 'N2', 'N3', 'REM']):
    """
    Generate a comprehensive evaluation report

    Args:
        y_true (numpy.ndarray): Ground truth labels
        y_pred (numpy.ndarray): Predicted labels
        class_names (list): List of class names

    Returns:
        dict: Evaluation report containing various metrics
    """
    # Overall metrics
    num_classes = len(class_names)
    overall_metrics = calculate_overall_metrics(y_true, y_pred, num_classes)

    # Per-class metrics
    per_class_df = calculate_per_class_metrics(y_true, y_pred, num_classes)
    per_class_metrics = {}
    for i, cls in enumerate(class_names):
        per_class_metrics[cls] = {
            'precision': per_class_df.loc[i, 'Precision'],
            'recall': per_class_df.loc[i, 'Recall'],
            'f1': per_class_df.loc[i, 'F1-Score']
        }

    # Confusion matrix
    cm = calculate_confusion_matrix(y_true, y_pred, num_classes=num_classes)
    cm_normalized = calculate_confusion_matrix(y_true, y_pred, normalize='true', num_classes=num_classes)

    # Combine into report
    report = {
        'overall': overall_metrics,
        'per_class': per_class_metrics,
        'confusion_matrix': cm,
        'confusion_matrix_normalized': cm_normalized,
        'n_samples': len(y_true)
    }

    return report


def print_evaluation_report(report, class_names=['W', 'N1', 'N2', 'N3', 'REM']):
    """
    Print evaluation report in a readable format

    Args:
        report (dict): Evaluation report from generate_evaluation_report
        class_names (list): List of class names
    """
    print("\n----- Evaluation Report -----")
    print(f"Number of samples: {report['n_samples']}")

    print("\n--- Overall Metrics ---")
    print(f"Accuracy: {report['overall']['accuracy']:.4f}")
    print(f"Macro F1-score: {report['overall']['macro_f1']:.4f}")
    print(f"Weighted F1-score: {report['overall']['weighted_f1']:.4f}")
    print(f"Cohen's Kappa: {report['overall']['kappa']:.4f}")

    print("\n--- Per-Class Metrics ---")

    # Create pretty table
    headers = ['Class', 'Precision', 'Recall', 'F1-Score']
    row_format = "{:>8}" + "{:>12}" * (len(headers) - 1)
    print(row_format.format(*headers))

    for cls in class_names:
        metrics = report['per_class'][cls]
        print(row_format.format(
            cls,
            f"{metrics['precision']:.4f}",
            f"{metrics['recall']:.4f}",
            f"{metrics['f1']:.4f}"
        ))

    print("\n--- Confusion Matrix ---")
    cm = report['confusion_matrix']
    print("     " + " ".join(f"{cls:>7}" for cls in class_names))
    for i, cls in enumerate(class_names):
        print(f"{cls:>5}" + " ".join(f"{cm[i, j]:>7}" for j in range(len(class_names))))

    print("\n--- Normalized Confusion Matrix (by row) ---")
    cm_norm = report['confusion_matrix_normalized']
    print("     " + " ".join(f"{cls:>7}" for cls in class_names))
    for i, cls in enumerate(class_names):
        print(f"{cls:>5}" + " ".join(f"{cm_norm[i, j]:>7.4f}" for j in range(len(class_names))))

    print("\n------------------------------")