from common_import import *
import pandas as pd
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from common_import import *
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.ensemble import RandomForestRegressor
import lightgbm as lgb
from sklearn.linear_model import ElasticNet
from sklearn.preprocessing import StandardScaler


def evaluate_model(model, X_test, y_test):
    y_pred = model.predict(X_test)
    mse = mean_squared_error(y_test, y_pred)
    r2 = r2_score(y_test, y_pred)
    print(f"Mean Squared Error: {mse:.4f}")
    print(f"R^2 Score: {r2:.4f}")


def cal_importance_XGBRegressor(descriptor_data, activity_data, evaluate=False):
    # 分离特征数据和目标值
    X = descriptor_data
    y = activity_data

    # 将数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )

    # 使用XGBoost训练模型
    model = XGBRegressor(
        objective="reg:squarederror", n_estimators=100, random_state=42
    )
    model.fit(X_train, y_train)

    # 如果需要查看拟合效果
    if evaluate:
        evaluate_model(model, X_test, y_test)

    # 获取特征重要性评分
    importance = model.get_booster().get_score(importance_type="weight")
    importance_df = pd.DataFrame(importance.items(), columns=["Feature", "Importance"])
    y_pred = model.predict(X_test)
    tool.plot_predictions(y_test, y_pred, "xgb预测.png")
    return importance_df


def cal_importance_RandomForestRegressor(
    descriptor_data, activity_data, evaluate=False
):
    # 分离特征数据和目标值
    X = descriptor_data
    y = activity_data

    # 将数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )

    # 使用随机森林训练模型
    model = RandomForestRegressor(n_estimators=100, random_state=42)
    model.fit(X_train, y_train)

    # 如果需要查看拟合效果
    if evaluate:
        evaluate_model(model, X_test, y_test)

    # 获取特征重要性评分
    importance = model.feature_importances_
    importance_df = pd.DataFrame({"Feature": X.columns, "Importance": importance})
    y_pred = model.predict(X_test)
    tool.plot_predictions(y_test, y_pred, "随机森林预测.png")
    return importance_df


def cal_importance_LGBMRegressor(descriptor_data, activity_data, evaluate=False):
    # 分离特征数据和目标值
    X = descriptor_data
    y = activity_data

    # 将数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )

    # 使用LightGBM训练模型
    # model = lgb.LGBMRegressor(objective="regression", n_estimators=100, random_state=42)
    model = lgb.LGBMRegressor(
        objective="regression",
        n_estimators=100,
        random_state=40,
        learning_rate=0.05,
        # force_to_wise=True,
        max_depth=15,
        # num_leaves=31,
        # subsample=0.6,
    )
    model.fit(X_train, y_train)

    # 如果需要查看拟合效果
    if evaluate:
        evaluate_model(model, X_test, y_test)

    # 获取特征重要性评分
    importance = model.booster_.feature_importance(importance_type="split")
    importance_df = pd.DataFrame({"Feature": X.columns, "Importance": importance})
    y_pred = model.predict(X_test)
    tool.plot_predictions(y_test, y_pred, "lgbm预测.png")
    return importance_df


def cal_importance_ElasticNet(descriptor_data, activity_data, evaluate=False):
    # 分离特征数据和目标值
    X = descriptor_data
    y = activity_data
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)

    # 将数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X_scaled, y, test_size=0.2, random_state=42
    )

    # 使用ElasticNet训练模型
    model = ElasticNet(alpha=1.0, l1_ratio=0.5, random_state=42)
    model.fit(X_train, y_train)

    # 如果需要查看拟合效果
    if evaluate:
        evaluate_model(model, X_test, y_test)

    # 获取模型的系数作为特征的重要性评分
    importance = np.abs(model.coef_)
    importance_df = pd.DataFrame({"Feature": X.columns, "Importance": importance})
    y_pred = model.predict(X_test)
    tool.plot_predictions(y_test, y_pred, "弹性网络预测.png")
    return importance_df


from sklearn.linear_model import LinearRegression


def cal_importance_LinearRegression(descriptor_data, activity_data, evaluate=False):
    # 分离特征数据和目标值
    X = descriptor_data
    y = activity_data

    # 将数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )

    # 使用多重线性回归训练模型
    model = LinearRegression()
    model.fit(X_train, y_train)

    # 如果需要查看拟合效果
    if evaluate:
        evaluate_model(model, X_test, y_test)

    # 获取特征重要性评分（线性回归的特征重要性可以通过系数来表示）
    importance = model.coef_
    importance_df = pd.DataFrame({"Feature": X.columns, "Importance": importance})

    # 预测测试集结果
    y_pred = model.predict(X_test)

    # 绘制预测结果图
    tool.plot_predictions(y_test, y_pred, "多重线性回归预测.png")

    return importance_df


from sklearn.svm import SVR


def cal_importance_SVR(descriptor_data, activity_data, evaluate=False):
    # 分离特征数据和目标值
    X = descriptor_data
    y = activity_data

    # 将数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )

    # 使用支持向量机回归模型训练
    model = SVR(kernel="linear")
    model.fit(X_train, y_train)

    # 如果需要查看拟合效果
    if evaluate:
        evaluate_model(model, X_test, y_test)

    # 获取特征重要性评分（SVM的特征重要性可以通过其系数来表示，但仅限于线性核）
    importance = model.coef_.flatten()  # SVM的coef_属性为二维数组
    importance_df = pd.DataFrame({"Feature": X.columns, "Importance": importance})

    # 预测测试集结果
    y_pred = model.predict(X_test)

    # 绘制预测结果图
    tool.plot_predictions(y_test, y_pred, "支持向量机预测.png")

    return importance_df


if __name__ == "__main__":
    molecular_descriptor = pd.read_csv("data/Molecular_Descriptor_training.csv")
    era_activity = pd.read_csv("data/ER_activity_training.csv")

    # 移除不必要的列（例如化合物ID）
    X = molecular_descriptor.drop(columns=["SMILES"])
    y = era_activity["pIC50"]

    # filter_X = myfilter.advanced_filter_features(X, y)
    filter_X = X[constants.feature_20]
    # print(filter_X.shape)
    # tune_lgbm_hyperparameters(filter_X, y)
    # importance1 = cal_importance_LGBMRegressor(filter_X, y, evaluate=True)
    # importance2 = cal_importance_RandomForestRegressor(filter_X, y, evaluate=True)
    # importance3 = cal_importance_XGBRegressor(filter_X, y, evaluate=True)
    # importance4 = cal_importance_ElasticNet(filter_X, y, evaluate=True)
    cal_importance_SVR(filter_X, y, evaluate=True)
    # print(importance1)
    # print(importance2)
    # print(importance3)
    # print(importance4)
    # 重命名每个DataFrame中的Importance列，以表示不同的模型
    # importance1.rename(columns={"Importance": "Importance_LGBM"}, inplace=True)
    # importance2.rename(columns={"Importance": "Importance_RF"}, inplace=True)
    # importance3.rename(columns={"Importance": "Importance_XGB"}, inplace=True)
    # importance4.rename(columns={"Importance": "Importance_ElasticNet"}, inplace=True)

    # 合并所有的importance DataFrame，按Feature列进行合并
    # merged_importance = (
    #     importance1.merge(importance2, on="Feature", how="outer")
    #     .merge(importance3, on="Feature", how="outer")
    #     .merge(importance4, on="Feature", how="outer")
    # )

    # 将合并后的DataFrame保存为CSV文件
    # merged_importance.to_csv("data/combined_importance2.csv", index=False)
    # sum_importance1 = importance1["Importance"].sum()
    # sum_importance2 = importance2["Importance"].sum()
    # sum_importance3 = importance3["Importance"].sum()
    # sum_importance4 = importance4["Importance"].sum()

    # # 打印每组重要性的总和
    # print(f"Total Importance for LGBMRegressor: {sum_importance1}")
    # print(f"Total Importance for RandomForestRegressor: {sum_importance2}")
    # print(f"Total Importance for XGBRegressor: {sum_importance3}")
    # print(f"Total Importance for ElasticNet: {sum_importance4}")
