import sys
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.ticker as mick
import datetime

from sklearn.tree import DecisionTreeClassifier

# 获取当前脚本的目录
current_dir = os.path.dirname(os.path.abspath(__file__))
# 项目根目录（根据实际结构调整，可能是current_dir或其父目录）
project_root = os.path.dirname(current_dir)  # 若utils在父目录
# 或 project_root = current_dir  # 若utils与脚本同目录
sys.path.insert(0, project_root)
import pandas as pd
import numpy as np
from sklearn.model_selection import StratifiedKFold, cross_val_score
from sklearn.preprocessing import StandardScaler, OneHotEncoder
from sklearn.compose import ColumnTransformer
from sklearn.pipeline import Pipeline
from sklearn.metrics import roc_auc_score
import lightgbm as lgb


# from utils.common import data_preprocessing  # 本地包导入

from xgboost import XGBRegressor, XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error, root_mean_squared_error
from sklearn.metrics import confusion_matrix,roc_auc_score, roc_curve, auc

import joblib

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15


def DecisionTreeClassifier_决策树(feature_list):
    x = pd.read_csv('../')
    es = DecisionTreeClassifier()
    param_grid = {
        # 选择不纯度度量标准，"gini"（基尼系数）或 "entropy"（信息熵）
        'criterion': ['gini', 'entropy'],
        # 选择划分策略，"best"（最优划分）或 "random"（随机划分）
        'splitter': ['best', 'random'],
        # 树的最大深度，控制树的复杂度，防止过拟合，可尝试不同整数值
        'max_depth': [None, 3, 5, 7, 10],
        # 节点划分所需的最小样本数，整数表示样本数量，浮点数表示占总样本的比例
        'min_samples_split': [2, 5, 10],
        # 叶子节点所需的最小样本数，整数或浮点数，意义同上
        'min_samples_leaf': [1, 2, 4],
        # 考虑特征的最大数量，整数、浮点数或 "sqrt"（特征数的平方根）、"log2"（特征数的对数）等
        'max_features': [None, 'sqrt', 'log2', 5, 10]
    }

def model_LightGBM():
    # 1. 读数据
    df = pd.read_csv('../../data/processed/train_final.csv')

    # 2. 基本预处理
    target = 'Attrition'
    y = df[target]
    X = df.drop(columns=[target])

    num_cols = X.select_dtypes(include=['int64', 'float64']).columns
    cat_cols = X.select_dtypes(include=['object', 'category', 'bool']).columns

    preprocessor = ColumnTransformer(
        transformers=[
            ('num', StandardScaler(), num_cols),
            ('cat', OneHotEncoder(handle_unknown='ignore'), cat_cols)
        ])

    # 3. 特征选择 – 先用 LightGBM 自带的 feature_importance 选 Top20
    lgb0 = lgb.LGBMClassifier(
        n_estimators=100,
        random_state=42,
        verbose=-1
    )
    pipe0 = Pipeline(steps=[('prep', preprocessor),
                            ('model', lgb0)])
    pipe0.fit(X, y)

    # 提取特征名
    ohe = pipe0.named_steps['prep'].named_transformers_['cat']
    cat_features = ohe.get_feature_names_out(cat_cols)
    all_features = np.concatenate([num_cols, cat_features])

    # 取 importance 前 20
    importances = pipe0.named_steps['model'].feature_importances_
    top20_idx = np.argsort(importances)[-20:]
    top20_features = all_features[top20_idx]
    print(f'特征列：{top20_features}')
    # 4. 只保留 Top20 特征重新训练
    X_transformed = preprocessor.fit_transform(X)
    X_top20 = X_transformed[:, top20_idx]

    # 5. 交叉验证
    lgb_final = lgb.LGBMClassifier(
        n_estimators=400,
        learning_rate=0.05,
        max_depth=-1,
        num_leaves=31,
        subsample=0.8,
        colsample_bytree=0.8,
        random_state=42,
        verbose=-1
    )

    cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
    auc_scores = cross_val_score(lgb_final, X_top20, y,
                                 scoring='roc_auc', cv=cv)

    print('5-fold AUC scores:', auc_scores)
    print('Mean AUC:', auc_scores.mean())
    print('Std AUC:', auc_scores.std())

    # 6. 单折最高 AUC（与报告一致）
    lgb_final.fit(X_top20, y)
    y_pred = lgb_final.predict_proba(X_top20)[:, 1]
    print('Full-data-retrained AUC:', roc_auc_score(y, y_pred))
    joblib.dump(lgb_final,'./light.pkl')
def test_light():
    model_path = './light.pkl'
    lgb_final = joblib.load(model_path)
    # 1. 读取训练和测试数据
    train_df = pd.read_csv('../../data/processed/train_final.csv')
    test_df = pd.read_csv('../../data/processed/test_final.csv')

    # 2. 基本预处理：分离特征和目标变量，并区分数值型和类别型特征
    target = 'Attrition'
    y_train = train_df[target]
    X_train = train_df.drop(columns=[target])
    y_test = test_df[target]
    X_test = test_df.drop(columns=[target])

    num_cols = X_train.select_dtypes(include=['int64', 'float64']).columns
    cat_cols = X_train.select_dtypes(include=['object', 'category', 'bool']).columns

    preprocessor = ColumnTransformer(
        transformers=[
            ('num', StandardScaler(), num_cols),
            ('cat', OneHotEncoder(handle_unknown='ignore'), cat_cols)
        ])

    # 3. 特征选择 – 先用 LightGBM 自带的 feature_importance 选 Top20
    lgb0 = lgb.LGBMClassifier(
        n_estimators=100,
        random_state=42,
        verbose=-1
    )
    pipe0 = Pipeline(steps=[('prep', preprocessor),
                            ('model', lgb0)])
    pipe0.fit(X_train, y_train)

    # 提取特征名
    ohe = pipe0.named_steps['prep'].named_transformers_['cat']
    cat_features = ohe.get_feature_names_out(cat_cols)
    all_features = np.concatenate([num_cols, cat_features])

    # 取 importance 前 20
    importances = pipe0.named_steps['model'].feature_importances_
    top20_idx = np.argsort(importances)[-20:]
    top20_features = all_features[top20_idx]

    # 4. 只保留 Top20 特征重新准备训练和测试数据
    X_train_transformed = preprocessor.fit_transform(X_train)
    X_test_transformed = preprocessor.transform(X_test)

    # 获取训练和测试数据中对应的 Top20 特征
    X_train_top20 = pd.DataFrame(X_train_transformed, columns=all_features)[top20_features]
    X_test_top20 = pd.DataFrame(X_test_transformed, columns=all_features)[top20_features]

    # 5. 使用模型进行预测并计算AUC
    y_pred = lgb_final.predict_proba(X_test_top20)[:, 1]
    test_auc = roc_auc_score(y_test, y_pred)

    print(f'测试集上的AUC分数: {test_auc}')
def XGB_model(feature_names=None):
    """
    训练多个机器学习模型

    参数:
    feature_names (list, optional): 特征列名列表，如果为None则使用除target外的所有列
    """
    # 1. 读数据
    data = pd.read_csv('../../data/processed/train_final.csv')



    X = data[feature_names]
    y = data['Attrition']
    X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42, stratify=y)

    # 定义多个模型和参数，使用AUC作为评分标准
    # 增加LogisticRegression的max_iter以避免收敛警告
    models = {
        'XGB': (XGBClassifier(random_state=42, eval_metric='logloss'), {
            'n_estimators': [100, 200,300],
            'learning_rate': [0.05, 0.1,0.15,0.2],
            'max_depth': [3,5,7,9],
            'subsample': [0.8,0.9, 1.0],
            'colsample_bytree': [0.8, 0.9,1.0],
            'gamma': [0, 0.1],
            'reg_alpha': [0, 0.05,0.1],
            'reg_lambda': [0, 0.05,0.1]
        }),
    }

    best_models = {}

    # 对每个模型进行网格搜索，使用AUC作为评分标准
    for name, (model, params) in models.items():
        # 使用AUC作为评分标准
        grid_search = GridSearchCV(model, params, cv=4, scoring='roc_auc', n_jobs=-1)
        grid_search.fit(X_train, y_train)

        # 保存最佳模型
        best_models[name] = grid_search

        print(f"{name} 最佳参数: {grid_search.best_params_}")
        print(f"{name} 最佳AUC得分: {grid_search.best_score_}")

    return best_models


if __name__ == '__main__':
    # model_LightGBM()
    test_light()
    # XGB_model()
