import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
from lightgbm import LGBMClassifier
from xgboost import XGBClassifier
from sklearn.neural_network import MLPClassifier
from common_import import *
from sklearn.metrics import roc_curve, auc
from sklearn.neighbors import KNeighborsClassifier


def plot_combined_roc_curve(y_true_dict, y_pred_proba_dict):
    """
    绘制五个模型的ROC曲线在一张图内
    """
    plt.figure(figsize=(8, 6))
    total_auc = 0

    for target in y_true_dict.keys():
        fpr, tpr, _ = roc_curve(y_true_dict[target], y_pred_proba_dict[target])
        roc_auc = auc(fpr, tpr)
        total_auc += roc_auc  # 累积每个目标的AUC
        plt.plot(fpr, tpr, lw=2, label=f"{target} (AUC = {roc_auc:.2f})")

    # 计算平均AUC
    average_auc = total_auc / len(y_true_dict)
    print(f"auc:{average_auc:.4f}")
    # 添加对角线
    plt.plot([0, 1], [0, 1], color="navy", lw=2, linestyle="--")
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel("假阳性率")
    plt.ylabel("真阳性率")
    plt.title("")
    plt.legend(loc="lower right")
    tool.show_or_print("roc曲线图.png")


def train_RandomForest(X, y, n_components=120):
    """
    数据预处理、模型训练和评估
    """

    # 标准化特征
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 使用PCA降维以减少多重共线性
    pca = PCA(n_components=n_components)
    X_pca = pca.fit_transform(X_scaled)

    # 分割数据集为训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_pca, y, test_size=0.2, random_state=42
    )

    # 训练随机森林分类器
    rf_models = {}
    total_accuracy = 0
    y_true_dict = {}
    y_pred_proba_dict = {}
    for target in ["Caco-2", "CYP3A4", "hERG", "HOB", "MN"]:
        rf = RandomForestClassifier(n_estimators=100, random_state=42)
        rf.fit(X_train, y_train[target])
        rf_models[target] = rf

        # 评估模型
        y_pred = rf.predict(X_val)
        accuracy = accuracy_score(y_val[target], y_pred)
        total_accuracy += accuracy
        y_pred_proba = rf.predict_proba(X_val)[:, 1]  # 只选择正类的概率
        y_true_dict[target] = y_val[target]
        y_pred_proba_dict[target] = y_pred_proba
        # print(f"Classification report for {target}:")
        # print(classification_report(y_val[target], y_pred))
    print(f"acc: {total_accuracy/5:.4f}")
    plot_combined_roc_curve(y_true_dict, y_pred_proba_dict)
    return rf_models, scaler, pca


def train_SVM(X, y, n_components=120):
    """
    数据预处理、模型训练和评估
    """

    # 标准化特征
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 使用PCA降维以减少多重共线性
    pca = PCA(n_components=n_components)
    X_pca = pca.fit_transform(X_scaled)

    # 分割数据集为训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_pca, y, test_size=0.2, random_state=42
    )

    # 训练支持向量机分类器
    svm_models = {}
    total_accuracy = 0
    y_true_dict = {}
    y_pred_proba_dict = {}
    for target in ["Caco-2", "CYP3A4", "hERG", "HOB", "MN"]:
        svm = SVC(
            kernel="rbf", C=1.0, random_state=42, probability=True
        )  # 使用RBF核的SVM分类器
        svm.fit(X_train, y_train[target])
        svm_models[target] = svm

        # 评估模型
        y_pred = svm.predict(X_val)
        accuracy = accuracy_score(y_val[target], y_pred)
        total_accuracy += accuracy
        y_pred_proba = svm.predict_proba(X_val)[:, 1]  # 只选择正类的概率
        y_true_dict[target] = y_val[target]
        y_pred_proba_dict[target] = y_pred_proba
        # print(f"Classification report for {target}:")
        # print(classification_report(y_val[target], y_pred))
    print(f"acc: {total_accuracy/5:.4f}")
    plot_combined_roc_curve(y_true_dict, y_pred_proba_dict)
    return svm_models, scaler, pca


def train_lgbm(X, y, n_components=60):
    """
    数据预处理、模型训练和评估
    """

    # 标准化特征
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 使用PCA降维以减少多重共线性
    pca = PCA(n_components=n_components)
    X_pca = pca.fit_transform(X_scaled)

    # 分割数据集为训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_pca, y, test_size=0.2, random_state=42
    )

    # 训练LightGBM分类器
    lgbm_models = {}
    total_accuracy = 0
    y_true_dict = {}
    y_pred_proba_dict = {}
    for target in ["Caco-2", "CYP3A4", "hERG", "HOB", "MN"]:
        lgbm = LGBMClassifier(n_estimators=100, learning_rate=0.1, random_state=42)
        lgbm.fit(X_train, y_train[target])
        lgbm_models[target] = lgbm

        # 评估模型
        y_pred = lgbm.predict(X_val)
        accuracy = accuracy_score(y_val[target], y_pred)
        total_accuracy += accuracy
        y_pred_proba = lgbm.predict_proba(X_val)[:, 1]  # 只选择正类的概率
        y_true_dict[target] = y_val[target]
        y_pred_proba_dict[target] = y_pred_proba
        # print(f"Classification report for {target}:")
        # print(classification_report(y_val[target], y_pred))
    print(f"acc: {total_accuracy/5:.4f}")
    plot_combined_roc_curve(y_true_dict, y_pred_proba_dict)
    return lgbm_models, scaler, pca


def train_xgb(X, y, n_components=60):
    # 标准化特征
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 使用PCA降维以减少多重共线性
    pca = PCA(n_components=n_components)
    X_pca = pca.fit_transform(X_scaled)

    # 分割数据集为训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_pca, y, test_size=0.2, random_state=42
    )

    # 训练XGBoost分类器
    xgb_models = {}
    total_accuracy = 0
    y_true_dict = {}
    y_pred_proba_dict = {}
    for target in ["Caco-2", "CYP3A4", "hERG", "HOB", "MN"]:
        xgb = XGBClassifier(
            n_estimators=100, learning_rate=0.1, eval_metric="auc", random_state=42
        )
        xgb.fit(X_train, y_train[target])
        xgb_models[target] = xgb
        # 评估模型
        y_pred = xgb.predict(X_val)
        accuracy = accuracy_score(y_val[target], y_pred)
        total_accuracy += accuracy
        y_pred_proba = xgb.predict_proba(X_val)[:, 1]  # 只选择正类的概率
        y_true_dict[target] = y_val[target]
        y_pred_proba_dict[target] = y_pred_proba
        # print(f"Classification report for {target}:")
        # print(classification_report(y_val[target], y_pred))
    print(f"acc: {total_accuracy/5:.4f}")
    plot_combined_roc_curve(y_true_dict, y_pred_proba_dict)
    return xgb_models, scaler, pca


def plot_explained_variance(pca):
    """
    绘制PCA累积解释方差图，显示累积解释方差比例。
    """
    cumulative_variance_ratio = np.cumsum(pca.explained_variance_ratio_)

    plt.figure(figsize=(8, 6))
    print(cumulative_variance_ratio[-1])
    plt.plot(
        range(1, len(cumulative_variance_ratio) + 1),
        cumulative_variance_ratio,
        marker="o",
        linestyle="-",
        color="b",
        label="Cumulative explained variance",
    )
    plt.xlabel("主成分数量")
    plt.ylabel("累积解释方差比例")
    plt.title("")
    plt.legend(loc="best")
    plt.grid(True)
    tool.show_or_print("PCA；累计方差显示")


def train_mlp(X, y, n_components=70):
    """
    使用多层感知器（MLP）进行训练和评估。
    自动判断是否使用PCA降维。
    """

    # 标准化特征
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    pca = PCA(n_components=n_components)
    X_transformed = pca.fit_transform(X_scaled)
    plot_explained_variance(pca)
    # 分割数据集为训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_transformed, y, test_size=0.2, random_state=42
    )

    # 训练MLP分类器
    mlp_models = {}
    total_accuracy = 0
    y_true_dict = {}
    y_pred_proba_dict = {}
    for target in ["Caco-2", "CYP3A4", "hERG", "HOB", "MN"]:
        mlp = MLPClassifier(hidden_layer_sizes=(45,), max_iter=1000, random_state=42)
        mlp.fit(X_train, y_train[target])
        mlp_models[target] = mlp

        # 评估模型
        y_pred = mlp.predict(X_val)
        accuracy = accuracy_score(y_val[target], y_pred)
        total_accuracy += accuracy
        y_pred_proba = mlp.predict_proba(X_val)[:, 1]  # 只选择正类的概率
        y_true_dict[target] = y_val[target]
        y_pred_proba_dict[target] = y_pred_proba
        print(f"{target}:  {accuracy:.4f}")
        print(f"Classification report for {target}:")
        print(classification_report(y_val[target], y_pred))
    print(f"acc: {total_accuracy/5:.4f}")
    # plot_combined_roc_curve(y_true_dict, y_pred_proba_dict)
    return mlp_models, scaler, pca


def train_knn(X, y, n_components=70, n_neighbors=5):
    """
    数据预处理、模型训练和评估
    """

    # 标准化特征
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 使用PCA降维以减少多重共线性
    pca = PCA(n_components=n_components)
    X_pca = pca.fit_transform(X_scaled)

    # 分割数据集为训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_pca, y, test_size=0.2, random_state=42
    )

    # 训练KNN分类器
    knn_models = {}
    total_accuracy = 0
    y_true_dict = {}
    y_pred_proba_dict = {}
    for target in ["Caco-2", "CYP3A4", "hERG", "HOB", "MN"]:
        knn = KNeighborsClassifier(n_neighbors=n_neighbors, metric="minkowski")
        knn.fit(X_train, y_train[target])
        knn_models[target] = knn

        # 评估模型
        y_pred = knn.predict(X_val)
        accuracy = accuracy_score(y_val[target], y_pred)
        total_accuracy += accuracy
        y_pred_proba = knn.predict_proba(X_val)[:, 1]  # 只选择正类的概率
        y_true_dict[target] = y_val[target]
        y_pred_proba_dict[target] = y_pred_proba

    print(f"Average accuracy across all five models: {total_accuracy / 5:.4f}")
    plot_combined_roc_curve(y_true_dict, y_pred_proba_dict)
    return knn_models, scaler, pca


if __name__ == "__main__":
    molecular_descriptor = pd.read_csv("data/Molecular_Descriptor_training.csv")
    ADMET = pd.read_csv("data/ADMET_training.csv")
    X = molecular_descriptor.drop(columns=["SMILES"])
    y = ADMET[["Caco-2", "CYP3A4", "hERG", "HOB", "MN"]]
    # 数据预处理、模型训练和评估
    # rf_models, scaler, pca = train_RandomForest(X, y)
    # rf_models, scaler, pca = train_SVM(X, y)
    # rf_models, scaler, pca = train_lgbm(X, y)
    # rf_models, scaler, pca = train_xgb(X, y)
    rf_models, scaler, pca = train_mlp(X, y)
    # rf_models, scaler, pca = train_knn(X, y)
