import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import roc_curve, roc_auc_score
from sklearn.metrics import confusion_matrix
from sklearn.metrics import accuracy_score, recall_score, f1_score, roc_auc_score, confusion_matrix
from statsmodels.stats.proportion import proportion_confint
import joblib


def calculate_metrics_with_ci(y_true, y_pred, y_score, n_bootstrap=2000, alpha=0.05):
    # basic metrics
    acc = accuracy_score(y_true, y_pred)
    sens = recall_score(y_true, y_pred, average='binary')
    
    tn, fp, fn, tp = confusion_matrix(y_true, y_pred).ravel()
    spec = tn / (tn + fp)

    # CI for Accuracy
    acc_ci = proportion_confint(int((y_true == y_pred).sum()), len(y_true), alpha=alpha, method='beta')
    
    # CI for Sensitivity
    sens_ci = proportion_confint(tp, tp + fn, alpha=alpha, method='beta')
    
    # CI for Specificity
    spec_ci = proportion_confint(tn, tn + fp, alpha=alpha, method='beta')
    
    # CI for AUC
    auc = roc_auc_score(y_true, y_score)
    boot_auc = []
    rng = np.random.RandomState(42)
    for _ in range(n_bootstrap):
        indices = rng.randint(0, len(y_true), len(y_true))
        if len(np.unique(y_true[indices])) < 2:
            continue
        boot_auc.append(roc_auc_score(y_true[indices], y_score[indices]))
    boot_auc = np.sort(boot_auc)
    auc_ci = (
        boot_auc[int((alpha / 2) * len(boot_auc))],
        boot_auc[int((1 - alpha / 2) * len(boot_auc))]
    )
    
    # CI for F1
    f1 = f1_score(y_true, y_pred, average='binary')
    boot_f1 = []
    for _ in range(n_bootstrap):
        indices = rng.randint(0, len(y_true), len(y_true))
        if len(np.unique(y_true[indices])) < 2:
            continue
        boot_f1.append(f1_score(y_true[indices], y_pred[indices]))
    boot_f1 = np.sort(boot_f1)
    f1_ci = (
        boot_f1[int((alpha / 2) * len(boot_f1))],
        boot_f1[int((1 - alpha / 2) * len(boot_f1))]
    )

    return {
        "accuracy": (acc, acc_ci),
        "sensitivity": (sens, sens_ci),
        "specificity": (spec, spec_ci),
        "auc": (auc, auc_ci),
        "f1": (f1, f1_ci),
    }

def plot_roc(y_true, y_score):
    fpr, tpr, thresholds = roc_curve(y_true, y_score)
    auc = roc_auc_score(y_true, y_score)

    plt.figure()
    plt.plot(fpr, tpr, label=f'AUC = {auc:.3f}')
    plt.plot([0, 1], [0, 1], 'k--', label='Random')
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic (ROC) Curve')
    plt.legend(loc='lower right')
    plt.grid(True)
    plt.tight_layout()
    plt.show()

def plot_confusion_matrix(y_true, y_pred, labels=None):
    cm = confusion_matrix(y_true, y_pred)
    plt.figure(figsize=(6, 5))
    sns.heatmap(cm, annot=True, fmt="d", cmap="Blues", 
                xticklabels=labels, yticklabels=labels)
    plt.xlabel("Predicted Label")
    plt.ylabel("True Label")
    plt.title("Confusion Matrix")
    plt.tight_layout()
    plt.show()

def main(ages):
    # Load data
    train_df = pd.read_csv(args.train_csv)
    test_df = pd.read_csv(args.test_csv)
    train_df['RestingECG/PatientDemographics/Gender'] = train_df['RestingECG/PatientDemographics/Gender'].map({'MALE': 1, 'FEMALE': 0})
    test_df['RestingECG/PatientDemographics/Gender'] = test_df['RestingECG/PatientDemographics/Gender'].map({'MALE': 1, 'FEMALE': 0})

    X_train = train_df[['RestingECG/PatientDemographics/PatientAge', 'RestingECG/PatientDemographics/Gender']]
    y_train = train_df[args.label_col]
    X_test = test_df[['RestingECG/PatientDemographics/PatientAge', 'RestingECG/PatientDemographics/Gender']]

    # standalization + logistic regression
    pipeline = Pipeline([
        ('scaler', StandardScaler()),
        ('logreg', LogisticRegression(solver='saga', max_iter=1000, n_jobs=-1))
    ])

    # train the model
    pipeline.fit(X_train, y_train)

    # save the model
    joblib.dump(pipeline, 'DeepECG/performance_test/regression_saved_model/logistic_model.pkl')

    # Test
    y_pred_proba = pipeline.predict_proba(X_test)[:, 1]
    y_pred_label = pipeline.predict(X_test)
    y_trues = test_df[args.label_col].to_numpy()

    # Sace test results
    test_df['y_pred_proba'] = y_pred_proba
    test_df['y_pred_label'] = y_pred_label
    # test_df.to_csv('test_with_predictions.csv', index=False)

    results = calculate_metrics_with_ci(y_trues, y_pred_label, y_pred_proba)
    for metric, (val, ci) in results.items():
        print(f"{metric.capitalize()}: {val:.3f} (95% CI: {ci[0]:.3f} – {ci[1]:.3f})")

    plot_roc(y_trues, y_pred_proba)
    plot_confusion_matrix(y_trues, y_pred_label, labels=["No AF", "New-onset AF"])


if __name__ == '__main__':
    import argparse

    parser = argparse.ArgumentParser()
    parser.add_argument('--train_csv', type=str, default='DeepECG/dataset/alpine/train_31_day.csv')
    parser.add_argument('--test_csv', type=str, default='DeepECG/dataset/alpine/all_test_31_day.csv')
    parser.add_argument('--label_col', type=str, default='AF_31days')
    args = parser.parse_args()

    main(args)