import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier, plot_importance
from sklearn.metrics import classification_report, accuracy_score, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE
import joblib

# 加载数据
data = pd.read_csv('../data/train.csv')

# 特征处理
drop_cols = [
    'YearsInCurrentRole',
    'YearsWithCurrManager',
    'PerformanceRating',
    'Over18'
]
X = data.drop(columns=drop_cols + ['Attrition'])
y = data['Attrition']

# 缺失值处理，平均值填充
num_cols = X.select_dtypes(include=['int64', 'float64']).columns
for col in num_cols:
    X[col].fillna(X[col].mean(), inplace=True)

# 众数填充
cat_cols = X.select_dtypes(include=['object']).columns
for col in cat_cols:
    X[col].fillna(X[col].mode()[0], inplace=True)

# 类别编码
label_encoder = LabelEncoder()
for col in cat_cols:
    X[col] = label_encoder.fit_transform(X[col])

# 标准化数据
scaler = StandardScaler()
X_scaled = scaler.fit_transform(X)

# SMOTE处理类别不平衡
smote = SMOTE(random_state=42, k_neighbors=4)
X_resampled, y_resampled = smote.fit_resample(X_scaled, y)

# 训练集/测试集划分
X_train, X_test, y_train, y_test = train_test_split(X_resampled, y_resampled, test_size=0.2, random_state=42)

# 调整类别权重，使模型关注离职员工
# RandomForest
rf_model = RandomForestClassifier(
    n_estimators=200, max_depth=20, random_state=42, class_weight='balanced'
)
rf_model.fit(X_train, y_train)

# XGBoost
neg, pos = (y_train == 0).sum(), (y_train == 1).sum()
xgb_model = XGBClassifier(
    random_state=42,
    scale_pos_weight=neg / pos,
    n_estimators=200,
    max_depth=6,
    learning_rate=0.1
)
xgb_model.fit(X_train, y_train)

# 预测与评估
y_pred_rf = rf_model.predict(X_test)
y_pred_xgb = xgb_model.predict(X_test)

# AUC & ROC曲线
roc_auc_rf = roc_auc_score(y_test, rf_model.predict_proba(X_test)[:, 1])
roc_auc_xgb = roc_auc_score(y_test, xgb_model.predict_proba(X_test)[:, 1])

# 输出AUC
print(f"Random Forest AUC: {roc_auc_rf}")
print(f"XGBoost AUC: {roc_auc_xgb}")

# 精确率、召回率、F1分数
print("Random Forest Accuracy:", accuracy_score(y_test, y_pred_rf))
print(classification_report(y_test, y_pred_rf))

print("XGBoost Accuracy:", accuracy_score(y_test, y_pred_xgb))
print(classification_report(y_test, y_pred_xgb))

# 调整预测阈值
threshold = 0.4  # 降低阈值，提高召回率
y_pred_rf = (rf_model.predict_proba(X_test)[:, 1] >= threshold).astype(int)
y_pred_xgb = (xgb_model.predict_proba(X_test)[:, 1] >= threshold).astype(int)

# 输出调整后评估结果
print("Random Forest with adjusted threshold accuracy:", accuracy_score(y_test, y_pred_rf))
print(classification_report(y_test, y_pred_rf))

print("XGBoost with adjusted threshold accuracy:", accuracy_score(y_test, y_pred_xgb))
print(classification_report(y_test, y_pred_xgb))

# ROC曲线
fpr_rf, tpr_rf, _ = roc_curve(y_test, rf_model.predict_proba(X_test)[:, 1])
fpr_xgb, tpr_xgb, _ = roc_curve(y_test, xgb_model.predict_proba(X_test)[:, 1])

plt.figure()
plt.plot(fpr_rf, tpr_rf, color='blue', label=f'Random Forest (AUC={roc_auc_rf:.2f})')
plt.plot(fpr_xgb, tpr_xgb, color='green', label=f'XGBoost (AUC={roc_auc_xgb:.2f})')
plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('ROC Curve')
plt.legend(loc='best')
plt.show()

# 特征重要性可视化
rf_importances = pd.Series(rf_model.feature_importances_, index=X.columns)
rf_importances = rf_importances.sort_values(ascending=False).head(15)

plt.figure(figsize=(10, 6))
sns.barplot(x=rf_importances.values, y=rf_importances.index, palette="viridis")
plt.title("Random Forest - Top 15 Feature Importances")
plt.xlabel("Importance")
plt.ylabel("Feature")
plt.show()

# 保存模型
joblib.dump(rf_model, '../model/rf_model.pkl')
joblib.dump(xgb_model, '../model/xgb_model.pkl')
