
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, classification_report, roc_auc_score
from imblearn.over_sampling import SMOTE
from imblearn.combine import SMOTEENN  # 结合过采样和欠采样
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline as ImbPipeline
import joblib
from xgboost import XGBClassifier

# ----------------------
# 1. 数据加载与预处理
# ----------------------
df = pd.read_csv('../data/train3.csv')

# 提取特征和目标变量（保持与测试一致的特征处理）
X = df.drop(['Attrition', 'EmployeeNumber'], axis=1)
y = df['Attrition']

# 记录分类特征列（用于后续编码）
categorical_cols = X.select_dtypes(include=['object']).columns.tolist()

# 独热编码
X_encoded = pd.get_dummies(X, columns=categorical_cols)
feature_names = X_encoded.columns.tolist()

# 保存特征元数据（供测试用）
joblib.dump(feature_names, "../model/feature_names.pkl")
joblib.dump(categorical_cols, "../model/categorical_cols.pkl")

# ----------------------
# 2. 改进样本平衡策略
# ----------------------
# 方案1: 增强版SMOTE（SMOTE+ENN）- 推荐优先尝试
# SMOTE过采样少数类 + ENN欠采样多数类，减少噪声
smote_enn = SMOTEENN(random_state=42,
                     smote=SMOTE(sampling_strategy=0.5, random_state=42))  # 少数类:多数类 = 1:2
X_resampled, y_resampled = smote_enn.fit_resample(X_encoded, y)


# 划分数据集
X_train, X_test, y_train, y_test = train_test_split(
    X_resampled, y_resampled, test_size=0.2, random_state=17, stratify=y_resampled
)

# ----------------------
# 3. 模型调优（重点提升少数类识别）
# ----------------------
# 优化超参数：增加对少数类的关注
param_grid = {
    # 'classifier__n_estimators': [100, 200, 300],
    # 'classifier__max_depth': [8, 10, 12],
    # 'classifier__min_samples_split': [2, 5],
    # 'classifier__min_samples_leaf': [1, 2],  # 控制过拟合
    # 'classifier__class_weight': ['balanced', 'balanced_subsample']  # 自动平衡类别权重
}

# 创建带权重的随机森林管道
pipeline = ImbPipeline([
    ('classifier', RandomForestClassifier(random_state=42))
])

# 网格搜索：使用F1分数作为评估指标（对不平衡数据更合理）
grid_search = GridSearchCV(
    estimator=pipeline,
    param_grid=param_grid,
    cv=5,
    scoring='f1',  # 优先优化F1分数
    n_jobs=-1,
    verbose=1
)

grid_search.fit(X_train, y_train)
best_rf = grid_search.best_estimator_

# ----------------------
# 4. 评估优化效果
# ----------------------
# 预测与评估
y_pred = best_rf.predict(X_test)
y_pred_proba = best_rf.predict_proba(X_test)[:, 1]

# 计算核心指标
print("优化后模型评估:")
print(classification_report(y_test, y_pred))
print(f"AUC值: {roc_auc_score(y_test, y_pred_proba):.4f}")

# 重点关注流失样本（标签1）的召回率
print(f"\n流失样本召回率: {recall_score(y_test, y_pred):.4f}")

# 保存优化后的模型
joblib.dump(best_rf, "../model/人才流失数据分析_优化版")

# ----------------------

# ----------------------
# 模型测试部分
# ----------------------
# 加载模型和必要的元数据
model = joblib.load("../model/人才流失数据分析_优化版")
feature_names = joblib.load("../model/feature_names.pkl")
categorical_cols = joblib.load("../model/categorical_cols.pkl")

# 读取测试数据
df_test = pd.read_csv('../data/test2.csv')

# 准备测试数据（保持与训练数据处理一致）
# 1. 移除不需要的列
x_test_raw = df_test.drop(['Attrition', 'EmployeeNumber'], axis=1)  # 移除标签和ID列
y_test = df_test['Attrition']  # 真实标签

# 2. 对分类特征进行独热编码（与训练时一致）
x_test_encoded = pd.get_dummies(x_test_raw, columns=categorical_cols)

# 3. 确保测试数据特征与训练数据完全一致
# 添加训练时存在但测试时缺失的特征（值为0）
missing_features = set(feature_names) - set(x_test_encoded.columns)
for feature in missing_features:
    x_test_encoded[feature] = 0

# 删除测试时存在但训练时没有的特征
extra_features = set(x_test_encoded.columns) - set(feature_names)
x_test_encoded = x_test_encoded.drop(columns=extra_features)

# 4. 确保特征顺序与训练时一致
x_test_encoded = x_test_encoded[feature_names]

# 5. 取前30条数据进行预测（如需要）
x_test_sample = x_test_encoded.iloc[0:30]
y_test_sample = y_test.iloc[0:30]

# 进行预测
y_pred = model.predict(x_test_encoded)
y_pred_proba = model.predict_proba( x_test_encoded)  # 预测概率

# 评估模型表现
accuracy = accuracy_score(y_test, y_pred)
precision = precision_score(y_test, y_pred)
recall = recall_score(y_test, y_pred)
f1 = f1_score(y_test, y_pred)

# 输出评估结果
print("\n测试集评估结果:")
print(f"准确率(Accuracy): {accuracy:.4f}")
print(f"精确率(Precision): {precision:.4f}")
print(f"召回率(Recall): {recall:.4f}")
print(f"F1分数: {f1:.4f}")
print(f"AUC值: {roc_auc_score(y_test, y_pred):.4f}")

# 详细分类报告
print("\n分类报告:")
print(classification_report(y_test, y_pred))
