# -*- coding: utf-8 -*-
# !pip install pandas scikit-learn matplotlib seaborn xgboost joblib

import logging
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import classification_report, confusion_matrix, roc_auc_score, roc_curve
from xgboost import XGBClassifier
from sklearn.model_selection import GridSearchCV
from typing import Tuple
import joblib

# 设置中文字体以支持中文显示
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 配置日志记录
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s %(message)s')

# 数据读取与检查
try:
    df = pd.read_csv('D:\\wenjian\\py\\pythonProject\\mmw1\\data\\train.csv')
    test_data = pd.read_csv('D:\\wenjian\\py\\pythonProject\\mmw1\\data\\test2.csv')
    logging.info('数据读取成功')
except Exception as e:
    logging.error(f'数据读取失败: {e}')
    raise

# 打印数据信息和描述性统计
print(df.info())
print(df.describe())

# 特征工程统一处理
TARGET = 'Attrition'
DROP_COLS = ['JobSatisfaction'] if 'JobSatisfaction' in df.columns else []
X_train = df.drop([TARGET] + DROP_COLS, axis=1)
y_train = df[TARGET]
X_test = test_data.drop([TARGET] + DROP_COLS, axis=1)
y_test = test_data[TARGET]

# 独热编码，保证特征对齐
X_all = pd.concat([X_train, X_test], axis=0)
X_all_encoded = pd.get_dummies(X_all)
X_train_encoded = X_all_encoded.iloc[:len(X_train), :].copy()
X_test_encoded = X_all_encoded.iloc[len(X_train):, :].copy()

# 标准化
scaler = StandardScaler()
X_train_scaled = scaler.fit_transform(X_train_encoded)
X_test_scaled = scaler.transform(X_test_encoded)

# 模型训练与评估模块
def train_and_evaluate_model(model, X_train, y_train, X_test, y_test, model_name="模型") -> Tuple[float, np.ndarray]:
    """
    训练并评估模型。

    参数:
    model: 机器学习模型实例。
    X_train: 训练特征数据。
    y_train: 训练目标标签。
    X_test: 测试特征数据。
    y_test: 测试目标标签。
    model_name: 模型名称，默认为“模型”。

    返回:
    auc: 模型的AUC分数。
    y_proba: 模型预测的标签概率。
    """
    model.fit(X_train, y_train)
    y_pred = model.predict(X_test)
    y_proba = model.predict_proba(X_test)[:, 1]
    auc = roc_auc_score(y_test, y_proba)
    logging.info(f'{model_name} AUC: {auc:.4f}')
    print(f'{model_name} AUC: {auc:.4f}')
    print(classification_report(y_test, y_pred))
    cm = confusion_matrix(y_test, y_pred)
    print(f'{model_name} 混淆矩阵:\n{cm}')
    return auc, y_proba

# 逻辑回归
lr = LogisticRegression(random_state=22, max_iter=1000)
lr_auc, lr_proba = train_and_evaluate_model(lr, X_train_scaled, y_train, X_test_scaled, y_test, model_name="Logistic Regression")

# 随机森林
rf = RandomForestClassifier(random_state=22)
rf_auc, rf_proba = train_and_evaluate_model(rf, X_train_scaled, y_train, X_test_scaled, y_test, model_name="Random Forest")

# XGBoost 原始模型
xgb = XGBClassifier(random_state=22, use_label_encoder=False, eval_metric='logloss')
xgb_auc, xgb_proba = train_and_evaluate_model(xgb, X_train_scaled, y_train, X_test_scaled, y_test, model_name="XGBoost")

# XGBoost 参数调优
param_grid = {
    'n_estimators': [100, 200],
    'max_depth': [3, 5, 7],
    'learning_rate': [0.01, 0.1, 0.2]
}

grid_search = GridSearchCV(
    estimator=XGBClassifier(use_label_encoder=False, eval_metric='logloss', random_state=22),
    param_grid=param_grid,
    scoring='roc_auc',
    cv=5,
    verbose=1,
    n_jobs=-1
)

grid_search.fit(X_train_scaled, y_train)
best_xgb = grid_search.best_estimator_
best_auc, best_proba = train_and_evaluate_model(best_xgb, X_train_scaled, y_train, X_test_scaled, y_test, model_name="Tuned XGBoost")

# 模型持久化保存
joblib.dump(best_xgb, 'D:\\wenjian\\py\\pythonProject\\mmw1\\lh\\model\\xgb_model.pkl')

# 特征重要性分析
importances = best_xgb.feature_importances_
features = X_train_encoded.columns
plt.figure(figsize=(15, 10))
plt.barh(features, importances)
plt.title('最优 XGBoost 模型特征重要性')
plt.xlabel('Importance')
plt.ylabel('Features')
plt.savefig('D:\\wenjian\\py\\pythonProject\\mmw1\\lh\\log\\xgb_feature_importance.png')
plt.show()

# ROC曲线绘制
plt.figure(figsize=(8, 6))
for name, proba in zip(["Logistic Regression", "Random Forest", "XGBoost", "Tuned XGBoost"],
                        [lr_proba, rf_proba, xgb_proba, best_proba]):
    fpr, tpr, _ = roc_curve(y_test, proba)
    auc = roc_auc_score(y_test, proba)
    plt.plot(fpr, tpr, label=f'{name} (AUC={auc:.2f})')
plt.plot([0, 1], [0, 1], 'k--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('人才流失分析')
plt.legend()
plt.savefig('D:\\wenjian\\py\\pythonProject\\mmw1\\lh\\log\\ROC曲线.png')
plt.show()

# 结果记录到文件
import datetime

results = {
    "timestamp": [datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S")],
    "LogisticRegression_AUC": [lr_auc],
    "RandomForest_AUC": [rf_auc],
    "XGBoost_AUC": [xgb_auc],
    "Tuned_XGBoost_AUC": [best_auc]
}
results_df = pd.DataFrame(results)
results_df.to_csv('D:\\wenjian\\py\\pythonProject\\mmw1\\lh\\log\\model_results.csv', mode='a', header=True, index=False)

# 可选：集成模型
y_proba_ensemble = (lr_proba + rf_proba + xgb_proba + best_proba) / 4
auc_ensemble = roc_auc_score(y_test, y_proba_ensemble)
print(f'Ensemble Model AUC: {auc_ensemble:.4f}')
