import numpy as np
import pandas as pd
import seaborn as sns
from imblearn.combine import SMOTEENN
from matplotlib import pyplot as plt
from sklearn.model_selection import train_test_split, GridSearchCV, RandomizedSearchCV, StratifiedKFold
from sklearn.metrics import accuracy_score, roc_auc_score, recall_score, f1_score, confusion_matrix, \
    precision_recall_curve
import xgboost as xgb
import joblib
from imblearn.over_sampling import SMOTE
from xgboost import XGBClassifier

# 1
data = pd.read_csv('../data/train.csv')  # 请替换为实际数据文件路径
# data.info()
feature_data = data.copy()

x = feature_data[['MaritalStatus', 'Department',
                  'JobRole', 'OverTime', 'JobSatisfaction',
                  'MonthlyIncome', 'YearsSinceLastPromotion', 'BusinessTravel',
                  'WorkLifeBalance', 'Age', 'YearsAtCompany',
                  'DistanceFromHome', 'EnvironmentSatisfaction', 'NumCompaniesWorked']]

x = pd.get_dummies(x)

y = feature_data['Attrition']

# x.info()


x_train, x_test, y_train, y_test = train_test_split(
    x, y, test_size=0.3, random_state=25
)

# SMOTE过采样（仅对训练集操作）
# smote = SMOTE(random_state=25)
# x_train, y_train = smote.fit_resample(x_train, y_train)

smote = SMOTE(random_state=25, sampling_strategy='auto')  # 或 'minority' 或具体比例
x_train, y_train = smote.fit_resample(x_train, y_train)

param_grid = {
    'max_depth': [3],
    'learning_rate': [0.2],
    'n_estimators': [600],
    'min_child_weight': [9],
    'scale_pos_weight': [1, 3, 5],  # 调整正负样本权重，缓解不平衡
    'reg_alpha': [1],  # L1正则化
    'reg_lambda': [0, 0.1],  # L2正则化
    'subsample': [1.0],  # 样本采样率
    'colsample_bytree': [1.0]  # 特征采样率
}
# grid_search = GridSearchCV(estimator=xgb.XGBClassifier(random_state=25),
#                            param_grid=param_grid,
#                            scoring='f1',  # 以F1值为优化目标
#                            cv=StratifiedKFold(n_splits=5),
#                            n_jobs=-1)

grid_search = RandomizedSearchCV(
    estimator=xgb.XGBClassifier(random_state=25, n_jobs=-1, objective='binary:logistic'),
    param_distributions=param_grid,
    scoring='f1',
    cv=StratifiedKFold(n_splits=5),
    n_iter=50,  # 搜索次数
    n_jobs=-1,
    random_state=25
)

grid_search.fit(x_train, y_train)
best_xgb = grid_search.best_estimator_
print(grid_search.best_params_)

# es = xgb.XGBClassifier(random_state=25,n_jobs=-1)

# 4、模型训练
# es.fit(x_train, y_train)


# 5、模型评估
# y_pred = best_xgb.predict(x_test)
# y_pred_proba = es.predict_proba(x_test)[:, 1]

# 四、阈值调整（基于验证集优化）
y_proba_val = best_xgb.predict_proba(x_test)[:, 1]
threshold = 0.71  # 通过验证集确定的最优阈值
# y_pred = (y_pred_proba > threshold).astype(int)

# precision, recall, thresholds = precision_recall_curve(y_test, y_proba_val)
# f1 = 2 * (precision * recall) / (precision + recall + 1e-8)  # 避免除零
#
# # 找到F1最大的阈值
# optimal_idx = np.argmax(f1)
# optimal_threshold = thresholds[optimal_idx]
# print(f"最优阈值: {optimal_threshold:.4f}")

# 应用最优阈值
y_pred = (y_proba_val > threshold).astype(int)

# 核心四指标
print(f"准确率: {accuracy_score(y_test, y_pred):.4f}")
print(f"AUC-ROC: {roc_auc_score(y_test, y_proba_val):.4f}")
print(f"召回率: {recall_score(y_test, y_pred):.4f}")
print(f"F1分数: {f1_score(y_test, y_pred):.4f}")

# 混淆矩阵
print("混淆矩阵:")
print(confusion_matrix(y_test, y_pred))

joblib.dump(best_xgb, "../model/xgb_20250604.pkl")
