import pandas as pd
from sklearn.metrics import (
    accuracy_score,
    precision_score,
    recall_score,
    f1_score,
    roc_auc_score,
    confusion_matrix,
    roc_curve,
    precision_recall_curve,
    auc
)
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
import os

# 评估OOF表格的表现
# Chinese font settings are removed/commented out.
# plt.rcParams['font.sans-serif'] = ['YourChoiceOfEnglishFont'] # Usually not needed, default is fine
# plt.rcParams['axes.unicode_minus'] = False # Still good to keep for minus sign
# --- Font setting end ---


def load_oof_predictions(file_path):
    """
    Load OOF prediction results CSV file.
    Args:
        file_path (str): Path to the CSV file.
    Returns:
        pandas.DataFrame: DataFrame with 'true_label' and 'prediction_prob' columns.
    """
    try:
        df = pd.read_csv(file_path)
        if 'true_label' not in df.columns or 'prediction_prob' not in df.columns:
            raise ValueError("CSV file must contain 'true_label' and 'prediction_prob' columns.")
        print(f"Successfully loaded data from '{file_path}', {len(df)} records.")
        return df
    except FileNotFoundError:
        print(f"Error: File '{file_path}' not found.")
        return None
    except Exception as e:
        print(f"Error loading data: {e}")
        return None

def evaluate_metrics(y_true, y_pred_prob, threshold=0.45, output_dir="evaluation_results"):
    """
    Calculate, print, and save classification metrics based on a given threshold.
    Args:
        y_true (array-like): True labels.
        y_pred_prob (array-like): Predicted probabilities for the positive class.
        threshold (float): Threshold to convert probabilities to class predictions.
        output_dir (str): Directory to save the metrics CSV file.
    Returns:
        dict: Dictionary containing the metrics.
        array-like: Predicted labels after applying the threshold.
    """
    y_pred_label = (y_pred_prob >= threshold).astype(int)

    acc = accuracy_score(y_true, y_pred_label)
    precision = precision_score(y_true, y_pred_label, zero_division=0)
    recall = recall_score(y_true, y_pred_label, zero_division=0)
    f1 = f1_score(y_true, y_pred_label, zero_division=0)
    roc_auc = roc_auc_score(y_true, y_pred_prob)

    metrics = {
        "Threshold": threshold,
        "Accuracy": acc,
        "Precision": precision,
        "Recall": recall,
        "F1-score": f1,
        "ROC-AUC": roc_auc
    }

    print(f"\n--- Evaluating with Threshold: {threshold} ---")
    print(f"Accuracy: {acc:.4f}")
    print(f"Precision: {precision:.4f}")
    print(f"Recall: {recall:.4f}")
    print(f"F1-score: {f1:.4f}")
    print(f"ROC-AUC: {roc_auc:.4f}")

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        print(f"Created directory: {output_dir}")

    metrics_df = pd.DataFrame([metrics])
    metrics_filename = f"evaluation_metrics_threshold_{str(threshold).replace('.', 'p')}.csv"
    metrics_save_path = os.path.join(output_dir, metrics_filename)
    try:
        metrics_df.to_csv(metrics_save_path, index=False)
        print(f"Evaluation metrics saved to: {metrics_save_path}")
    except Exception as e:
        print(f"Error saving metrics to CSV: {e}")

    return metrics, y_pred_label

def plot_confusion_matrix_custom(y_true, y_pred_label, output_dir, class_names=None, title='Confusion Matrix', threshold_val=0.45):
    """
    Plot, display, and save the confusion matrix.
    Args:
        y_true (array-like): True labels.
        y_pred_label (array-like): Predicted labels.
        output_dir (str): Directory to save the image.
        class_names (list, optional): Names of the classes. Defaults to ['Class 0', 'Class 1'].
        title (str): Title of the plot.
        threshold_val (float): Current threshold used, for filename.
    """
    if class_names is None:
        class_names = ['Negative (0)', 'Positive (1)'] # English class names

    cm = confusion_matrix(y_true, y_pred_label)
    fig, ax = plt.subplots(figsize=(6, 5))
    sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
                xticklabels=class_names, yticklabels=class_names,
                annot_kws={"size": 14}, ax=ax)
    ax.set_title(title, fontsize=16)
    ax.set_ylabel('True Label', fontsize=14)
    ax.set_xlabel('Predicted Label', fontsize=14)
    plt.xticks(fontsize=12)
    plt.yticks(fontsize=12)
    plt.tight_layout()

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        print(f"Created directory: {output_dir}")

    filename = f"confusion_matrix_threshold_{str(threshold_val).replace('.', 'p')}.png"
    save_path = os.path.join(output_dir, filename)
    try:
        plt.savefig(save_path)
        print(f"Confusion matrix saved to: {save_path}")
    except Exception as e:
        print(f"Error saving confusion matrix image: {e}")
    plt.show()
    plt.close(fig)
    print("\nConfusion Matrix Values:")
    print(cm)


def plot_roc_curve_custom(y_true, y_pred_prob, output_dir, title='ROC Curve'):
    """
    Plot, display, and save the ROC curve.
    Args:
        y_true (array-like): True labels.
        y_pred_prob (array-like): Predicted probabilities for the positive class.
        output_dir (str): Directory to save the image.
        title (str): Title of the plot.
    """
    fpr, tpr, _ = roc_curve(y_true, y_pred_prob)
    roc_auc_val = auc(fpr, tpr)

    fig, ax = plt.subplots(figsize=(7, 6))
    ax.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc_val:.2f})')
    ax.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
    ax.set_xlim([0.0, 1.0])
    ax.set_ylim([0.0, 1.05])
    ax.set_xlabel('False Positive Rate (FPR)', fontsize=14)
    ax.set_ylabel('True Positive Rate (TPR)', fontsize=14)
    ax.set_title(title, fontsize=16)
    ax.legend(loc="lower right", fontsize=12)
    ax.grid(alpha=0.3)
    plt.xticks(fontsize=12)
    plt.yticks(fontsize=12)
    plt.tight_layout()

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        print(f"Created directory: {output_dir}")

    filename = "roc_curve.png"
    save_path = os.path.join(output_dir, filename)
    try:
        plt.savefig(save_path)
        print(f"ROC curve saved to: {save_path}")
    except Exception as e:
        print(f"Error saving ROC curve image: {e}")
    plt.show()
    plt.close(fig)

def plot_precision_recall_curve_custom(y_true, y_pred_prob, output_dir, title='Precision-Recall Curve'):
    """
    Plot, display, and save the Precision-Recall curve.
    Args:
        y_true (array-like): True labels.
        y_pred_prob (array-like): Predicted probabilities for the positive class.
        output_dir (str): Directory to save the image.
        title (str): Title of the plot.
    """
    precision, recall, _ = precision_recall_curve(y_true, y_pred_prob)
    pr_auc_val = auc(recall, precision)

    fig, ax = plt.subplots(figsize=(7, 6))
    ax.plot(recall, precision, color='blue', lw=2, label=f'PR curve (area = {pr_auc_val:.2f})')
    ax.set_xlabel('Recall', fontsize=14)
    ax.set_ylabel('Precision', fontsize=14)
    ax.set_title(title, fontsize=16)
    ax.set_ylim([0.0, 1.05])
    ax.set_xlim([0.0, 1.0])
    ax.legend(loc="lower left", fontsize=12)
    ax.grid(alpha=0.3)
    plt.xticks(fontsize=12)
    plt.yticks(fontsize=12)
    plt.tight_layout()

    if not os.path.exists(output_dir):
        os.makedirs(output_dir)
        print(f"Created directory: {output_dir}")

    filename = "precision_recall_curve.png"
    save_path = os.path.join(output_dir, filename)
    try:
        plt.savefig(save_path)
        print(f"Precision-Recall curve saved to: {save_path}")
    except Exception as e:
        print(f"Error saving Precision-Recall curve image: {e}")
    plt.show()
    plt.close(fig)


if __name__ == "__main__":
    # --- Configuration ---
    oof_file_path = '/home/sunjun/ssd/dpp4/grid_search_outputs-test1/attention/run_1_att_dense_units_64_att_learning_rate_0p001_att_lstm_units_32/attention_oof_predictions.csv' # Your OOF filename
    classification_threshold = 0.45
    output_directory = "oof_evaluation_results_english_v1" # Output directory

    if not os.path.exists(output_directory):
        try:
            os.makedirs(output_directory)
            print(f"Successfully created output directory: {output_directory}")
        except Exception as e:
            print(f"Failed to create output directory {output_directory}: {e}")
            # exit()

    oof_df = load_oof_predictions(oof_file_path)

    if oof_df is not None:
        y_true_labels = oof_df['true_label']
        y_pred_probabilities = oof_df['prediction_prob']

        metrics_results, y_predicted_labels = evaluate_metrics(
            y_true_labels,
            y_pred_probabilities,
            threshold=classification_threshold,
            output_dir=output_directory
        )

        # --- Plot and save figures ---
        # 1. Confusion Matrix
        plot_confusion_matrix_custom(
            y_true_labels,
            y_predicted_labels,
            output_dir=output_directory,
            class_names=['Non-Resistant (0)', 'Resistant (1)'], # English class names
            title=f'Confusion Matrix (Threshold = {classification_threshold})',
            threshold_val=classification_threshold
        )

        # 2. ROC Curve
        plot_roc_curve_custom(
            y_true_labels,
            y_pred_probabilities,
            output_dir=output_directory,
            title='ROC Curve (based on OOF predictions)'
        )
        
        # 3. Precision-Recall Curve
        plot_precision_recall_curve_custom(
            y_true_labels,
            y_pred_probabilities,
            output_dir=output_directory,
            title='Precision-Recall Curve (based on OOF predictions)'
        )

        print("\nEvaluation and file saving completed.")
    else:
        print("Evaluation aborted due to data loading failure.")
