import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.metrics import accuracy_score, classification_report, confusion_matrix, roc_auc_score
from xgboost import XGBClassifier
import matplotlib.pyplot as plt
import seaborn as sns
import joblib
import time
from sklearn.metrics import roc_curve


# 设置随机种子以确保可重复性
np.random.seed(42)


# 1. 数据加载和预处理
def load_and_preprocess_data(file_path):
    # 加载数据
    df = pd.read_csv(file_path)

    # 删除无关特征
    df = df.drop(['EmployeeNumber', 'Over18', 'StandardHours'], axis=1)

    # 标签编码分类特征
    cat_cols = ['BusinessTravel', 'Department', 'EducationField', 'Gender',
                'JobRole', 'MaritalStatus', 'OverTime']

# 初始化LabelEncoder，用于后续对分类变量进行编码
    le = LabelEncoder()

# 遍历每一个分类变量列，将其转换为数值型编码
    for col in cat_cols:
    # 对当前列进行拟合和转换，将转换后的结果存回原数据框
        df[col] = le.fit_transform(df[col])
    return df


# 加载训练数据
train_df = load_and_preprocess_data("../data/train.csv")
X = train_df.drop('Attrition', axis=1)
y = train_df['Attrition']

# 2. 数据分割
X_train, X_val, y_train, y_val = train_test_split(
    X, y, test_size=0.2, random_state=42, stratify=y
)

# 3. 超参数调优 - 网格搜索交叉验证
print("Starting hyperparameter tuning with GridSearchCV...")
start_time = time.time()

# 定义参数网格
param_grid = {
    'n_estimators': [100, 200, 300],
    'max_depth': [3, 5, 7],
    'learning_rate': [0.01, 0.1, 0.2],
    'subsample': [0.7, 0.8, 0.9],
    'colsample_bytree': [0.7, 0.8, 0.9],
    'gamma': [0, 0.1, 0.2],
    'reg_alpha': [0, 0.1, 1],
    'reg_lambda': [1, 5, 10]
}

# 创建XGBoost分类器
xgb = XGBClassifier(random_state=42, eval_metric='logloss', use_label_encoder=False)

# 设置分层K折交叉验证
strat_kfold = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)

# 创建网格搜索对象
grid_search = GridSearchCV(
    estimator=xgb,
    param_grid=param_grid,
    scoring='roc_auc',
    cv=strat_kfold,
    n_jobs=-1,  # 使用所有可用的CPU核心
    verbose=2)

# 执行网格搜索
grid_search.fit(X_train, y_train)

# 输出最佳参数
print("\n已找到的最佳参数:")
print(grid_search.best_params_)
print(f"最佳的ROC-AUC分数: {grid_search.best_score_:.4f}")
print(f"超参数搜索耗时:{time.time() - start_time:.2f}秒")

# 4. 使用最佳参数训练最终模型
print("\n正在使用最佳参数训练最终模型")
best_model = grid_search.best_estimator_

# 在测试集上评估
val_preds = best_model.predict(X_val)
val_probs = best_model.predict_proba(X_val)[:, 1]

print("\n测试集性能:")
print(f"准确性: {accuracy_score(y_val, val_preds):.4f}")
print(f"ROC-AUC: {roc_auc_score(y_val, val_probs):.4f}")
print("\n分类报告:")
print(classification_report(y_val, val_preds))

# 绘制ROC曲线
fpr, tpr, _ = roc_curve(y_val, val_probs)
roc_auc = roc_auc_score(y_val, val_probs)

plt.figure(figsize=(10, 8))
plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver Operating Characteristic')
plt.legend(loc="lower right")
plt.savefig('roc_curve_val.png', bbox_inches='tight')
plt.close()

# 绘制混淆矩阵
cm = confusion_matrix(y_val, val_preds)
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues',
            xticklabels=['No Attrition', 'Attrition'],
            yticklabels=['No Attrition', 'Attrition'])
plt.xlabel('Predicted')
plt.ylabel('Actual')
plt.title('Confusion Matrix - Validation Set')
plt.savefig('confusion_matrix_val.png')
plt.close()

# 5. 在验证集上进行预测
print("\nLoading and preprocessing test data...")
test_df = load_and_preprocess_data("../data/test2.csv")
X_test = test_df.drop('Attrition', axis=1)
y_test = test_df['Attrition']

# 预测验证集
test_preds = best_model.predict(X_test)
test_probs = best_model.predict_proba(X_test)[:, 1]

#验证集评估性能
if 'Attrition' in test_df.columns:
    print("\n验证集性能:")
    print(f"准确率: {accuracy_score(y_test, test_preds):.4f}")
    print(f"ROC-AUC: {roc_auc_score(y_test, test_probs):.4f}")
    print("\n分类报告:")
    print(classification_report(y_test, test_preds))

    # 绘制ROC曲线
    fpr, tpr, _ = roc_curve(y_test, test_probs)
    roc_auc = roc_auc_score(y_test, test_probs)

    plt.figure(figsize=(10, 8))
    plt.plot(fpr, tpr, color='darkorange', lw=2, label=f'ROC curve (area = {roc_auc:.2f})')
    plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
    plt.xlim([0.0, 1.0])
    plt.ylim([0.0, 1.05])
    plt.xlabel('False Positive Rate')
    plt.ylabel('True Positive Rate')
    plt.title('Receiver Operating Characteristic')
    plt.legend(loc="lower right")
    plt.savefig('roc_curve_val.png', bbox_inches='tight')
    plt.close()

    # 绘制测试集混淆矩阵
    cm_test = confusion_matrix(y_test, test_preds)
    plt.figure(figsize=(8, 6))
    sns.heatmap(cm_test, annot=True, fmt='d', cmap='Blues',
                xticklabels=['No Attrition', 'Attrition'],
                yticklabels=['No Attrition', 'Attrition'])
    plt.xlabel('Predicted')
    plt.ylabel('Actual')
    plt.title('Confusion Matrix - Test Set')
    plt.savefig('confusion_matrix_test.png')
    plt.close()

# 6. 保存结果和模型
# 将预测结果添加到原始测试数据
test_df['Attrition_Predicted'] = test_preds
test_df['Attrition_Probability'] = test_probs

# 保存预测结果
test_df.to_csv('predictions_with_probabilities.csv', index=False)
print("\nPredictions saved to predictions_with_probabilities.csv")

# 保存模型
joblib.dump(best_model, '../model/best_xgboost_model.pkl')
print("Model saved to best_xgboost_model.pkl")

# 7. 特征重要性分析
feature_importance = best_model.feature_importances_
sorted_idx = np.argsort(feature_importance)[::-1]

plt.figure(figsize=(12, 10))
sns.barplot(x=feature_importance[sorted_idx][:20],
            y=X.columns[sorted_idx][:20])
plt.title('Top 20 Feature Importance')
plt.tight_layout()
plt.savefig('feature_importance.png')
plt.close()

print("\nProcess completed successfully!")