#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@File    : mrna_model_selecter.py
@Author  : Bing Liang
@Email   : believer19940901@gmail.com
@Date    : 2025/10/30
@Description :
mRNA多模型分类脚本，支持训练、评估、ROC和混淆矩阵绘制，并保存指标与参数
"""
import argparse
import warnings
from pathlib import Path

import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis
from sklearn.ensemble import (
    RandomForestClassifier,
    GradientBoostingClassifier,
    ExtraTreesClassifier,
    AdaBoostClassifier,
)
from sklearn.linear_model import LogisticRegression, RidgeClassifier, SGDClassifier
from sklearn.metrics import (
    roc_curve,
    auc,
    f1_score,
    accuracy_score,
    precision_score,
    recall_score,
    confusion_matrix,
)
from sklearn.model_selection import StratifiedKFold, GridSearchCV, train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.svm import SVC

warnings.filterwarnings("ignore")


def find_best_threshold(y_true, y_score):
    """在给定预测分数下找到使 F1 最大的阈值"""
    thresholds = np.linspace(0, 1, 101)
    f1s = [f1_score(y_true, (y_score >= t).astype(int)) for t in thresholds]
    return thresholds[np.argmax(f1s)]


def save_model_results(final_models, final_thresholds, X_train, y_train, X_test, y_test, scaler, out_dir):
    """保存每个模型的指标和参数"""
    results = []
    for model_name, model in final_models.items():
        thr = final_thresholds[model_name]

        # 训练集
        X_train_scaled = scaler.transform(X_train)
        y_score_train = (
            model.predict_proba(X_train_scaled)[:, 1]
            if hasattr(model, "predict_proba")
            else model.decision_function(X_train_scaled)
        )
        y_pred_train = (y_score_train >= thr).astype(int)
        train_metrics = {
            "dataset": "train",
            "accuracy": accuracy_score(y_train, y_pred_train),
            "precision": precision_score(y_train, y_pred_train),
            "recall": recall_score(y_train, y_pred_train),
            "f1": f1_score(y_train, y_pred_train),
            "auc": auc(*roc_curve(y_train, y_score_train)[:2]),
        }

        # 测试集
        X_test_scaled = scaler.transform(X_test)
        y_score_test = (
            model.predict_proba(X_test_scaled)[:, 1]
            if hasattr(model, "predict_proba")
            else model.decision_function(X_test_scaled)
        )
        y_pred_test = (y_score_test >= thr).astype(int)
        test_metrics = {
            "dataset": "test",
            "accuracy": accuracy_score(y_test, y_pred_test),
            "precision": precision_score(y_test, y_pred_test),
            "recall": recall_score(y_test, y_pred_test),
            "f1": f1_score(y_test, y_pred_test),
            "auc": auc(*roc_curve(y_test, y_score_test)[:2]),
        }

        # 模型参数
        params = model.get_params()
        params_str = ";".join([f"{k}={v}" for k, v in params.items()])

        results.append({
            "model": model_name,
            "dataset": "train",
            "best_threshold": thr,
            **train_metrics,
            "parameters": params_str
        })
        results.append({
            "model": model_name,
            "dataset": "test",
            "best_threshold": thr,
            **test_metrics,
            "parameters": params_str
        })

    df_results = pd.DataFrame(results)
    df_results.to_csv(out_dir / "all_models_metrics.csv", index=False)


def main(args):
    # 输出目录
    out_dir = Path(args.outdir).resolve()
    out_dir.mkdir(parents=True, exist_ok=True)

    # =========================
    # 1. 读取数据
    # =========================
    fpkm_df = pd.read_csv(args.input, sep="\t")
    group_df = pd.read_csv(args.group, sep="\t")
    diff_df = (
        pd.read_csv(args.deg, sep="\t", index_col=0)
        .sort_values(by="pvalue")
        .iloc[: args.top, :]
    )

    deg_genes = diff_df.index
    fpkm_df = fpkm_df.query("Gene in @deg_genes", engine="python").set_index("Gene")
    X = fpkm_df.T.values
    y = (
        group_df.set_index("Sample")
        .loc[fpkm_df.columns, "Group"]
        .map({"Death": 0, "Survival": 1})
        .values
    )

    # =========================
    # 2. 划分训练集与测试集
    # =========================
    X_train_full, X_test, y_train_full, y_test = train_test_split(
        X, y, test_size=0.25, stratify=y, random_state=42
    )

    # =========================
    # 3. 模型及参数
    # =========================
    models_params = {
        "LogisticRegression": (
            LogisticRegression(max_iter=10000, class_weight="balanced"),
            {"C": [0.001, 0.01, 0.1, 1], "penalty": ["l2"], "solver": ["lbfgs"]},
        ),
        "RidgeClassifier": (
            RidgeClassifier(class_weight="balanced"),
            {"alpha": list(range(1, 1000, 50))},
        ),
        "SGDClassifier": (
            SGDClassifier(max_iter=5000, tol=1e-3, class_weight="balanced"),
            {
                "loss": ["log_loss"],
                "alpha": [0.01, 0.1, 1],
                "penalty": ["l2", "elasticnet"],
            },
        ),
        "SVC_linear": (
            SVC(probability=True, class_weight="balanced"),
            {"C": [0.01, 0.1, 1], "kernel": ["linear"]},
        ),
        "SVC_rbf": (
            SVC(probability=True, class_weight="balanced"),
            {"C": [0.01, 0.1, 1], "kernel": ["rbf"], "gamma": ["scale", "auto"]},
        ),
        "KNN": (
            KNeighborsClassifier(),
            {"n_neighbors": list(range(1, 11)), "weights": ["distance"], "p": [1, 2]},
        ),
        "RandomForest": (
            RandomForestClassifier(class_weight="balanced", random_state=42),
            {
                "n_estimators": [50, 100, 200],
                "max_depth": [2, 3, 5],
                "min_samples_split": [2, 5, 10],
            },
        ),
        "ExtraTrees": (
            ExtraTreesClassifier(class_weight="balanced", random_state=42),
            {
                "n_estimators": [50, 100, 200],
                "max_depth": [2, 3, 5],
                "min_samples_split": [2, 5, 10],
            },
        ),
        "GradientBoosting": (
            GradientBoostingClassifier(random_state=42),
            {
                "n_estimators": [50, 100, 200],
                "learning_rate": [0.01, 0.05, 0.1],
                "max_depth": [2, 3, 4],
            },
        ),
        "AdaBoost": (
            AdaBoostClassifier(random_state=42),
            {"n_estimators": [50, 100, 200], "learning_rate": [0.01, 0.05, 0.1]},
        ),
        "LDA": (LinearDiscriminantAnalysis(), {"solver": ["svd", "lsqr", "eigen"]}),
    }

    # =========================
    # 4. 外折 + 内折交叉验证
    # =========================
    outer_cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
    inner_cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)

    final_models = {}
    final_thresholds = {}

    scaler_full = StandardScaler()
    X_train_scaled_full = scaler_full.fit_transform(X_train_full)
    X_test_scaled = scaler_full.transform(X_test)

    for model_name, (model_class, param_grid) in models_params.items():
        print(f"\nProcessing model: {model_name}")
        outer_f1_scores = []
        outer_best_models = []
        outer_best_thr = []

        for train_idx, valid_idx in outer_cv.split(X_train_full, y_train_full):
            X_train, X_valid = X_train_full[train_idx], X_train_full[valid_idx]
            y_train, y_valid = y_train_full[train_idx], y_train_full[valid_idx]

            scaler = StandardScaler()
            X_train_scaled = scaler.fit_transform(X_train)
            X_valid_scaled = scaler.transform(X_valid)

            grid = GridSearchCV(
                model_class, param_grid, scoring="f1", cv=inner_cv, n_jobs=-1
            )
            grid.fit(X_train_scaled, y_train)
            best_model = grid.best_estimator_

            y_score_valid = (
                best_model.predict_proba(X_valid_scaled)[:, 1]
                if hasattr(best_model, "predict_proba")
                else best_model.decision_function(X_valid_scaled)
            )
            thr = find_best_threshold(y_valid, y_score_valid)
            y_pred_valid = (y_score_valid >= thr).astype(int)

            outer_f1_scores.append(f1_score(y_valid, y_pred_valid))
            outer_best_models.append(best_model)
            outer_best_thr.append(thr)

        best_idx = np.argmax(outer_f1_scores)
        final_models[model_name] = outer_best_models[best_idx]
        final_thresholds[model_name] = outer_best_thr[best_idx]

    # =========================
    # 5. 绘制 ROC
    # =========================
    fig, axes = plt.subplots(1, 2, figsize=(16, 7))
    class_names = ["Death", "Survival"]

    for model_name, model in final_models.items():
        # 训练集
        y_score_train = (
            model.predict_proba(X_train_scaled_full)[:, 1]
            if hasattr(model, "predict_proba")
            else model.decision_function(X_train_scaled_full)
        )
        thr_train = final_thresholds[model_name]
        y_pred_train = (y_score_train >= thr_train).astype(int)
        fpr, tpr, _ = roc_curve(y_train_full, y_score_train)
        auc_val = auc(fpr, tpr)
        acc = accuracy_score(y_train_full, y_pred_train)
        prec = precision_score(y_train_full, y_pred_train)
        rec = recall_score(y_train_full, y_pred_train)
        f1_val = f1_score(y_train_full, y_pred_train)
        axes[0].plot(fpr, tpr, lw=2,
                     label=f"{model_name} (AUC={auc_val:.3f}, Acc={acc:.2f}, F1={f1_val:.2f})")

        # 测试集
        y_score_test = (
            model.predict_proba(X_test_scaled)[:, 1]
            if hasattr(model, "predict_proba")
            else model.decision_function(X_test_scaled)
        )
        y_pred_test = (y_score_test >= thr_train).astype(int)
        fpr_test, tpr_test, _ = roc_curve(y_test, y_score_test)
        auc_test_val = auc(fpr_test, tpr_test)
        axes[1].plot(fpr_test, tpr_test, lw=2,
                     label=f"{model_name} (AUC={auc_test_val:.3f}, Acc={accuracy_score(y_test, y_pred_test):.2f}, F1={f1_score(y_test, y_pred_test):.2f})")

    for ax, title in zip(axes, ["Training Set", "Test Set"]):
        ax.plot([0, 1], [0, 1], "k--", lw=1)
        ax.set_xlabel("False Positive Rate")
        ax.set_ylabel("True Positive Rate")
        ax.set_title(f"ROC - {title}")
        ax.legend(loc="lower right", fontsize=8)

    plt.tight_layout()
    plt.savefig(out_dir / "all_models.roc.png")
    plt.savefig(out_dir / "all_models.roc.pdf")
    plt.close()

    # =========================
    # 6. 绘制混淆矩阵
    # =========================
    for model_name, model in final_models.items():
        thr = final_thresholds[model_name]

        for dataset, X_data, y_data, cmap, suffix in [
            ("train", X_train_scaled_full, y_train_full, "Blues", "train"),
            ("test", X_test_scaled, y_test, "Greens", "test"),
        ]:
            y_score = (
                model.predict_proba(X_data)[:, 1]
                if hasattr(model, "predict_proba")
                else model.decision_function(X_data)
            )
            y_pred = (y_score >= thr).astype(int)
            cm = confusion_matrix(y_data, y_pred)
            plt.figure(figsize=(6, 5))
            sns.heatmap(cm, annot=True, fmt="d", cmap=cmap, xticklabels=class_names, yticklabels=class_names)
            plt.title(f"{model_name} - {dataset.capitalize()} Set")
            plt.xlabel("Predicted")
            plt.ylabel("Actual")
            plt.tight_layout()
            plt.savefig(out_dir / f"{model_name}_{suffix}_confusion_matrix.png")
            plt.savefig(out_dir / f"{model_name}_{suffix}_confusion_matrix.pdf")
            plt.close()

    # =========================
    # 7. 保存模型参数和指标
    # =========================
    save_model_results(final_models, final_thresholds, X_train_full, y_train_full, X_test, y_test, scaler_full, out_dir)


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="mRNA multi-model classifier")
    parser.add_argument("-i", "--input", required=True, help="TPM/FPKM expression matrix (genes x samples)")
    parser.add_argument("-g", "--group", required=True, help="Sample group file (Sample, Group)")
    parser.add_argument("-d", "--deg", required=True, help="DEG file for selecting top genes")
    parser.add_argument("-n", "--top", type=int, default=8, help="Top N DEGs to use")
    parser.add_argument("-o", "--outdir", required=True, help="Output directory")
    args = parser.parse_args()
    main(args)
