import pandas as pd
from xgboost import XGBClassifier
from sklearn.metrics import accuracy_score, classification_report, roc_auc_score


# 保留您的特征工程函数（稍作安全优化）
def tezhen(data):
    """特征工程函数（与您原始逻辑完全一致）"""
    df = data.copy()

    # 年龄分组
    bins = [18, 25, 30, 35, 40, 45, 50, 60]
    labels = ['18-25', '26-30', '31-35', '36-40', '41-45', '46-50', '51+']
    df['年龄分组'] = pd.cut(df['年龄'], bins=bins, labels=labels)
    df = pd.get_dummies(df, columns=['年龄分组'])

    # 出差情况热编码
    df = pd.get_dummies(df, columns=['出差情况'])

    # 离家距离分组
    bins_distance = [0, 5, 10, 15, 20, 25]
    labels_distance = ['0-5', '6-10', '11-15', '16-20', '21+']
    df['离家距离分组'] = pd.cut(df['距离（家）'], bins=bins_distance, labels=labels_distance)
    df = pd.get_dummies(df, columns=['离家距离分组'])

    # 教育情况热编码
    df = pd.get_dummies(df, columns=['教育情况'])

    # 工作角色热编码
    df = pd.get_dummies(df, columns=['工作角色'])

    # 婚姻状况热编码
    df = pd.get_dummies(df, columns=['婚姻状况'])

    # 月收入分组
    bins_monthly_income = [0, 2000, 5000, 10000, 15000, 20000, 25000]
    labels_monthly_income = ['0-2000', '2001-5000', '5001-10000', '10001-15000', '15001-20000', '20001+']
    df['月收入分组'] = pd.cut(df['月收入'], bins=bins_monthly_income, labels=labels_monthly_income)
    df = pd.get_dummies(df, columns=['月收入分组'])

    # 工作公司数量分组
    bins_working_company = [0, 1, 3, 8]
    labels_working_company = ['0', '1-3', '4+']
    df['工作公司数量分组'] = pd.cut(df['工作的公司数量'], bins=bins_working_company, labels=labels_working_company)
    df = pd.get_dummies(df, columns=['工作公司数量分组'])

    # 加班情况热编码
    df = pd.get_dummies(df, columns=['加班'])

    # 移除特征（保留原始逻辑）
    drop_cols = ['部门', '距离（家）', '部门.1', '员工编号', 'Over18', '加薪百分比',
                 '性别', '月收入', '绩效评级', '年龄', '工作的公司数量']
    for col in drop_cols:
        if col in df.columns:
            df = df.drop(columns=col)

    return df


# 确保特征一致性函数
def align_features(test_df, train_columns):
    """确保测试集特征与训练集一致"""
    # 添加缺失的特征（填充0）
    for col in set(train_columns) - set(test_df.columns):
        test_df[col] = 0

    # 移除多余的特征
    test_df = test_df[train_columns]

    return test_df


# 主程序
def main():
    # 1. 加载数据
    train_data = pd.read_csv("../data/train.csv", encoding='GBK')
    test_data = pd.read_csv("../data/test2.csv", encoding='GBK')

    print(f"训练集原始大小: {train_data.shape}, 测试集原始大小: {test_data.shape}")

    # 2. 分离目标变量
    X_train = train_data.drop('是否减员', axis=1)
    y_train = train_data['是否减员']
    X_test = test_data.drop('是否减员', axis=1)
    y_test = test_data['是否减员']

    # 3. 应用特征工程
    X_train_fe = tezhen(X_train)
    X_test_fe = tezhen(X_test)

    # 4. 特征对齐
    feature_columns = X_train_fe.columns.tolist()
    X_test_fe = align_features(X_test_fe, feature_columns)

    print(f"\n特征工程后训练集大小: {X_train_fe.shape}")
    print(f"特征工程后测试集大小: {X_test_fe.shape}")

    # 5. 处理类别不平衡
    class_ratio = (len(y_train[y_train == 0]) / len(y_train[y_train == 1]))
    print(f"\n类别比例（在职:离职）: {class_ratio}:1")

    # 6. 模型训练
    model = XGBClassifier(
        use_label_encoder=False,
        eval_metric='logloss',
        random_state=42,
        scale_pos_weight=class_ratio,
        max_depth=5,
        learning_rate=0.1,
        subsample=0.8,
        colsample_bytree=0.8,
        n_estimators=300
    )

    model.fit(X_train_fe, y_train)

    # 7. 预测与评估
    y_pred = model.predict(X_test_fe)
    y_proba = model.predict_proba(X_test_fe)[:, 1]

    print("\n" + "=" * 50)
    print("模型评估结果")
    print("=" * 50)
    print(f"准确率: {accuracy_score(y_test, y_pred):.4f}")
    print(f"AUC: {roc_auc_score(y_test, y_proba):.4f}")

    # 详细分类报告
    print("\n分类报告:")
    print(classification_report(y_test, y_pred))

    # 预测结果保存
    results = X_test.copy()
    results['离职预测概率'] = y_proba
    results['离职预测'] = y_pred
    results['实际离职'] = y_test

    results.to_csv("./data/预测结果.csv", index=False, encoding='GBK')
    print("\n预测结果已保存至: ./data/预测结果.csv")


if __name__ == "__main__":
    main()