import pandas as pd
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import train_test_split
from common_import import *
from sklearn.metrics import mean_squared_error, r2_score
from sklearn.ensemble import RandomForestRegressor
from sklearn.linear_model import ElasticNetCV
from sklearn.preprocessing import StandardScaler
import shap


def determine_feature_type(feature, threshold=10):
    # 判断是否包含小数
    if feature.dtype == float or feature.apply(lambda x: isinstance(x, float)).any():
        return "小数连续型"

    unique_values = feature.nunique()

    # 根据唯一值的数量判断是离散型还是连续型
    if unique_values <= threshold:
        return "离散型"
    else:
        return "整数连续型"


def evaluate_model(model, X_test, y_test):
    y_pred = model.predict(X_test)
    mse = mean_squared_error(y_test, y_pred)
    rmse = np.sqrt(mse)
    r2 = r2_score(y_test, y_pred)

    print("均方误差 (MSE):", mse)
    print("均方根误差 (RMSE):", rmse)
    print("决定系数 (R²):", r2)

    return mse, rmse, r2


def select_important_features_XGBRegressor(descriptor_data, activity_data):
    # 分离特征数据和目标值
    X = descriptor_data
    y = activity_data

    # 将数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )

    # 使用XGBoost训练模型
    model = XGBRegressor(
        objective="reg:squarederror", n_estimators=100, random_state=42
    )
    model.fit(X_train, y_train)

    # 量化拟合效果
    evaluate_model(model, X_test, y_test)

    # 原先的基于XGBoost特征重要性评分的代码
    # 获取特征重要性评分
    importance = model.get_booster().get_score(importance_type="weight")
    importance_df = pd.DataFrame(importance.items(), columns=["Feature", "Importance"])

    importance_df = importance_df.sort_values(by="Importance", ascending=False)
    top_20_features = importance_df.head(20)
    return top_20_features

    return top_20_features


def select_important_features_RandomForestRegressor(descriptor_data, activity_data):

    # 分离特征数据和目标值
    X = descriptor_data
    y = activity_data

    # 将数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )

    # 使用随机森林训练模型
    model = RandomForestRegressor(n_estimators=100, random_state=42)
    model.fit(X_train, y_train)

    # 量化拟合效果
    evaluate_model(model, X_test, y_test)

    # 获取特征重要性评分
    importance = model.feature_importances_
    importance_df = pd.DataFrame({"Feature": X.columns, "Importance": importance})

    # 根据重要性对特征进行排序
    importance_df = importance_df.sort_values(by="Importance", ascending=False)

    # 输出前20个最重要的分子描述符及其重要性评分
    # top_20_features = importance_df.head(50)
    # print("前20个对生物活性影响最显著的分子描述符及其重要性评分：")
    # print(top_20_features)

    # return top_20_features
    return importance_df


import lightgbm as lgb


def select_important_features_LGBMRegressor(descriptor_data, activity_data):
    # 分离特征数据和目标值
    X = descriptor_data
    y = activity_data

    # 将数据集分割为训练集和测试集
    X_train, X_test, y_train, y_test = train_test_split(
        X, y, test_size=0.2, random_state=42
    )

    # 使用LightGBM训练模型
    model = lgb.LGBMRegressor(objective="regression", n_estimators=100, random_state=42)
    model.fit(X_train, y_train)

    # 量化拟合效果
    evaluate_model(model, X_test, y_test)

    # 获取特征重要性评分
    importance = model.booster_.feature_importance(importance_type="split")
    importance_df = pd.DataFrame({"Feature": X.columns, "Importance": importance})

    # 根据重要性对特征进行排序
    importance_df = importance_df.sort_values(by="Importance", ascending=False)

    # 输出前20个最重要的分子描述符及其重要性评分
    top_20_features = importance_df.head(20)
    # print("前20个对生物活性影响最显著的分子描述符及其重要性评分：")
    # print(top_20_features)

    return top_20_features


import myfilter

if __name__ == "__main__":
    # 示例调用（假设数据已经被加载到dataframes中）
    molecular_descriptor = pd.read_csv("data/Molecular_Descriptor_training.csv")
    era_activity = pd.read_csv("data/ER_activity_training.csv")

    # 移除不必要的列（例如化合物ID）
    X = molecular_descriptor.drop(columns=["SMILES"])
    y = era_activity["pIC50"]

    filter_X = myfilter.advanced_filter_features(X, y)
    top_20_features = select_important_features_XGBRegressor(filter_X, y)
    # feature_types = []

    # for feature in top_20_features["Feature"]:
    #     feature_type = determine_feature_type(X[feature], 10)
    #     feature_types.append(feature_type)

    # top_20_features["Feature Type"] = feature_types

    # print("前20个对生物活性影响最显著的分子描述符及其重要性评分及类型：")
    # print(top_20_features)
