import os
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.utils import class_weight

from utils.log import Logger
from utils.common import data_preprocess
from xgboost import XGBClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, accuracy_score, roc_auc_score
import matplotlib.pyplot as plt
from sklearn.tree import plot_tree
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV

import joblib
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15

def modelpredict():
    # 数据加载与特征工程
    data = pd.read_csv('../data/train.csv')
    # 选择特征（加入类别特征）
    num_features = [ 'Age','DistanceFromHome', 'EnvironmentSatisfaction', 'JobSatisfaction', 'MonthlyIncome', 'TrainingTimesLastYear', 'RelationshipSatisfaction', 'YearsAtCompany', 'YearsAtCompany']
    cat_features = ['BusinessTravel', 'Department', 'Gender', 'MaritalStatus', 'JobRole', 'OverTime']
    # 处理类别特征（独热编码）
    data_cat = pd.get_dummies(data[cat_features], drop_first=True)
    train_data = pd.concat([data[num_features], data_cat], axis=1)
    target = data['Attrition']

    # 标准化
    transform = StandardScaler()
    train_data_scaled = transform.fit_transform(train_data)

    # 划分训练集和验证集（用于早停）
    X_train, X_val, y_train, y_val = train_test_split(
        train_data_scaled, target, test_size=0.2, random_state=23, stratify=target
    )

    # 处理类别不平衡（过采样 + 样本权重）
    classes_weights = class_weight.compute_sample_weight('balanced', y_train)

    # 初始化模型与网格搜索
    xgb_model = XGBClassifier(
        objective='binary:logistic',
        eval_metric='auc',
        early_stopping_rounds=10,
        random_state=23
    )

    param_grid = {
        'learning_rate': [0.05, 0.1, 0.2,0.3,0.4,0.5],
        'n_estimators': [100,150,250,230,300,200],
        'max_depth': [3, 5,6,4, 7, 8],
        'gamma': [0, 0.1],
        'reg_alpha': [0, 0.1],
        'reg_lambda': [0.5, 1]
    }

    grid_search = GridSearchCV(
        estimator=xgb_model,
        param_grid=param_grid,
        scoring='roc_auc',
        cv=3,
        n_jobs=-1
    )

    # 训练模型（传入样本权重）
    grid_search.fit(X_train, y_train,
                    sample_weight=classes_weights,
                    eval_set=[(X_val, y_val)],
                    verbose=False)

    print('最佳参数：', grid_search.best_params_)
    print('最佳交叉验证AUC：', grid_search.best_score_)

    # 测试集预测
    test_data = pd.read_csv('../data/test2.csv')
    test_cat = pd.get_dummies(test_data[cat_features], drop_first=True)
    x_test = pd.concat([test_data[num_features], test_cat], axis=1)
    x_test_scaled = transform.transform(x_test)
    y_test = test_data['Attrition']

    # 预测概率
    y_pred_proba = grid_search.predict_proba(x_test_scaled)[:, 1]
    print('测试集AUC值:', roc_auc_score(y_test, y_pred_proba))


if __name__ == '__main__':
    modelpredict()

