from common_import import *
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from common_import import *
import lightgbm as lgb
from sklearn.preprocessing import StandardScaler
from sklearn.decomposition import PCA
from sklearn.inspection import permutation_importance
from sklearn.neural_network import MLPClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import shap
from lightgbm import LGBMClassifier
from sklearn.metrics import accuracy_score


def cal_importance_LGBMRegressor(descriptor_data, activity_data):
    # 分离特征数据和目标值
    X = descriptor_data
    y = activity_data

    # 将数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )

    # 使用LightGBM训练模型
    # model = lgb.LGBMRegressor(objective="regression", n_estimators=100, random_state=42)
    model = lgb.LGBMRegressor(
        objective="regression",
        n_estimators=100,
        random_state=40,
        learning_rate=0.05,
        # force_to_wise=True,
        max_depth=15,
        verbose=-1,
    )
    model.fit(X_train, y_train)

    # 获取特征重要性评分
    importance = model.booster_.feature_importance(importance_type="split")
    importance_df = pd.DataFrame({"Feature": X.columns, "Importance1": importance})
    y_pred = model.predict(X_test)
    tool.plot_predictions(y_test, y_pred, "lgbm预测.png")
    return importance_df


def train_lgbm(X, y, n_components=60):
    """
    数据预处理、模型训练和评估
    """

    # 标准化特征
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 分割数据集为训练集和验证集
    X_train, X_val, y_train, y_val = train_test_split(
        X_scaled, y, test_size=0.2, random_state=42
    )

    # 训练LightGBM分类器
    lgbm_models = {}
    total_accuracy = 0
    y_true_dict = {}
    y_pred_proba_dict = {}
    feature_importance_df = pd.DataFrame()

    for target in ["Caco-2", "CYP3A4", "hERG", "HOB", "MN"]:
        lgbm = LGBMClassifier(
            n_estimators=100,
            learning_rate=0.1,
            random_state=42,
            force_col_wise=True,
            verbose=-1,  # 禁用所有输出信息
        )
        lgbm.fit(X_train, y_train[target])
        lgbm_models[target] = lgbm

        # 评估模型
        y_pred = lgbm.predict(X_val)
        accuracy = accuracy_score(y_val[target], y_pred)
        total_accuracy += accuracy
        y_pred_proba = lgbm.predict_proba(X_val)[:, 1]  # 只选择正类的概率
        y_true_dict[target] = y_val[target]
        y_pred_proba_dict[target] = y_pred_proba

        # 获取特征重要性
        importance = lgbm.feature_importances_
        feature_names = [f"feature_{i}" for i in range(X.shape[1])]
        temp_df = pd.DataFrame({"feature": feature_names, "importance": importance})
        temp_df["target"] = target  # 添加目标名称以区分不同的模型

        # 累积特征重要性
        feature_importance_df = pd.concat([feature_importance_df, temp_df], axis=0)

    print(f"Overall accuracy: {total_accuracy/5:.4f}")

    # 计算最大特征重要性
    feature_importance_max = (
        feature_importance_df.groupby("feature")["importance"].max().reset_index()
    )

    return feature_importance_max


from sklearn.preprocessing import MinMaxScaler


def normalize_and_combine_importance(importance_df):

    scaler = MinMaxScaler()

    # 选取需要归一化的列
    columns_to_normalize = ["Importance1", "Importance2"]

    # 对Importance1和Importance2列进行归一化
    importance_df[columns_to_normalize] = scaler.fit_transform(
        importance_df[columns_to_normalize]
    )

    # 创建importance_combine列，取归一化后Importance1和Importance2的最大值
    importance_df["importance_combine"] = importance_df[
        ["Importance1", "Importance2"]
    ].max(axis=1)

    # 按importance_combine降序排序
    importance_df.sort_values(by="importance_combine", ascending=False, inplace=True)

    # 返回处理后的DataFrame
    return importance_df


if __name__ == "__main__":
    molecular_descriptor = pd.read_csv("data/Molecular_Descriptor_training.csv")
    era_activity = pd.read_csv("data/ER_activity_training.csv")

    # 移除不必要的列（例如化合物ID）
    X = molecular_descriptor.drop(columns=["SMILES"])
    y = era_activity["pIC50"]
    importance1 = cal_importance_LGBMRegressor(X, y)
    print(importance1)
    ADMET = pd.read_csv("data/ADMET_training.csv")
    X = molecular_descriptor.drop(columns=["SMILES"])
    y = ADMET[["Caco-2", "CYP3A4", "hERG", "HOB", "MN"]]

    importance2 = train_lgbm(X, y, 70)
    importance1["Importance2"] = importance2["importance"].values
    result = normalize_and_combine_importance(importance1)
    result.to_csv("data/problem4_importance.csv", index=False)
    top_100_features = result["Feature"].head(295).tolist()

    # 输出前100个特征列表
    print(top_100_features)
