import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier, plot_importance
from sklearn.metrics import classification_report, accuracy_score, roc_auc_score, roc_curve
from imblearn.over_sampling import SMOTE
import joblib

# 加载数据
data = pd.read_csv('../data/train.csv')
# print(data.info())
# 特征处理
drop_cols = [
    'YearsInCurrentRole',
    'YearsWithCurrManager',
    'PerformanceRating',
    'Over18'
]
X = data.drop(columns=drop_cols + ['Attrition'])
y = data['Attrition']

# 缺失值处理，平均值填充
num_cols = X.select_dtypes(include=['int64', 'float64']).columns
for col in num_cols:
    X[col].fillna(X[col].mean(), inplace=True)

# 众数填充
cat_cols = X.select_dtypes(include=['object']).columns
for col in cat_cols:
    X[col].fillna(X[col].mode()[0], inplace=True)

# # ========== 6. 类别不平衡处理（SMOTE） ==========
# smote = SMOTE(random_state=42)
# X_resampled, y_resampled = smote.fit_resample(X_scaled, y)


# # 查看数据类型
# print(data.info())

# 类型编码
label_encoder = LabelEncoder()
for col in cat_cols:
    X[col] = label_encoder.fit_transform(X[col])

print(X.info())

# # hot热编码处理
# X = pd.get_dummies(X, columns=cat_cols)


# print(X.info())
#
#
# # # 标准化数据
# # scaler = StandardScaler()
# # X_scaled = scaler.fit_transform(X)
#
# ========== 6. 类别不平衡处理（SMOTE） ==========
# smote = SMOTE(random_state=42)
# X_resampled, y_resampled = smote.fit_resample(X_scaled, y)
#
# # ========== 7. 训练集/测试集划分 ==========
X_train, X_test, y_train, y_test = train_test_split(
    X, y, test_size=0.2, random_state=42
)
#
#
#
# # 标准化数据
scaler = StandardScaler()
# X_scaled = scaler.fit_transform(X)
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
# #
smote = SMOTE(random_state=42, k_neighbors=4)
X_train, y_train = smote.fit_resample(X_train, y_train)
# #
# #

# ========== 8. 模型训练：RandomForest ==========
rf_model = RandomForestClassifier(
    n_estimators=200, max_depth=20, random_state=42, class_weight='balanced'
)
rf_model.fit(X_train, y_train)

# 预测 & 评估
y_pred = rf_model.predict(X_test)
print("Random Forest Accuracy:", accuracy_score(y_pred, y_test))
print(classification_report(y_pred, y_test))
#
# # ========== 9. 模型训练：XGBoost ==========
neg, pos = (y_train == 0).sum(), (y_train == 1).sum()
xgb_model = XGBClassifier(
    random_state=42,
    scale_pos_weight=neg / pos,
    n_estimators=200,
    max_depth=6,
    learning_rate=0.1
)
xgb_model.fit(X_train, y_train)

# 预测 & 评估
y_pred_xgb = xgb_model.predict(X_test)
print("XGBoost Accuracy:", accuracy_score(y_test, y_pred_xgb))
print(classification_report(y_pred_xgb, y_test))

# # ========== 10. AUC & ROC曲线 ==========
roc_auc_rf = roc_auc_score(y_test, rf_model.predict_proba(X_test)[:, 1])
roc_auc_xgb = roc_auc_score(y_test, xgb_model.predict_proba(X_test)[:, 1])
print(f"Random Forest AUC: {roc_auc_rf}")
print(f"XGBoost AUC: {roc_auc_xgb}")

# # 随机森林网格搜索
# param_grid_rf = {
#     'n_estimators': [100, 200, 300],
#     'max_depth': [10, 20, None],
#     'min_samples_split': [2, 5, 10],
#     'class_weight': ['balanced', None]
# }
# grid_search_rf = GridSearchCV(RandomForestClassifier(random_state=42), param_grid_rf, cv=5, n_jobs=-1)
# grid_search_rf.fit(X_train, y_train)
# best_rf_model = grid_search_rf.best_estimator_
# print(best_rf_model)
# # XGBoost网格搜索
# param_grid_xgb = {
#     'n_estimators': [100, 200],
#     'max_depth': [6, 10],
#     'learning_rate': [0.05, 0.1, 0.2],
#     'scale_pos_weight': [neg / pos]
# }
# grid_search_xgb = GridSearchCV(XGBClassifier(random_state=42), param_grid_xgb, cv=5, n_jobs=-1)
# grid_search_xgb.fit(X_train, y_train)
# best_xgb_model = grid_search_xgb.best_estimator_
# print(best_xgb_model)
#
# fpr_rf, tpr_rf, _ = roc_curve(y_test, rf_model.predict_proba(X_test)[:, 1])
# fpr_xgb, tpr_xgb, _ = roc_curve(y_test, xgb_model.predict_proba(X_test)[:, 1])
#
# plt.figure()
# plt.plot(fpr_rf, tpr_rf, color='blue', label=f'Random Forest (AUC={roc_auc_rf:.2f})')
# plt.plot(fpr_xgb, tpr_xgb, color='green', label=f'XGBoost (AUC={roc_auc_xgb:.2f})')
# plt.plot([0, 1], [0, 1], color='navy', linestyle='--')
# plt.xlabel('False Positive Rate')
# plt.ylabel('True Positive Rate')
# plt.title('ROC Curve')
# plt.legend(loc='best')
# plt.show()
#
# # ========== 11. 特征重要性可视化 ==========
# # RandomForest 特征重要性
# rf_importances = pd.Series(rf_model.feature_importances_, index=X.columns)
# rf_importances = rf_importances.sort_values(ascending=False).head(15)
#
# plt.figure(figsize=(10, 6))
# sns.barplot(x=rf_importances.values, y=rf_importances.index, palette="viridis")
# plt.title("Random Forest - Top 15 Feature Importances")
# plt.xlabel("Importance")
# plt.ylabel("Feature")
# plt.show()
#
# # XGBoost 特征重要性
# plt.figure(figsize=(10, 6))
# plot_importance(xgb_model, max_num_features=15, importance_type='weight')
# plt.title("XGBoost - Top 15 Feature Importances")
# plt.show()
#
# # ========== 12. 保存模型 ==========
# joblib.dump(rf_model, '../model/rf_model.pkl')
# joblib.dump(xgb_model, '../model/xgb_model.pkl')
