import numpy,pandas
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import accuracy_score, classification_report, roc_auc_score
from sklearn.linear_model import LogisticRegression
import pandas as pd
from xgboost import XGBClassifier, XGBRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import accuracy_score, classification_report
import matplotlib.pyplot as plt
from sklearn.metrics import confusion_matrix

# 进行热编码
# 数据无异常，无空值
# 首先看年龄对离职的影响
import pandas as pd
import numpy as np
#年龄分组
def tezhen(data):
    salary_ratios = data['年龄'].values

    # 确定分位数点（这里分成5组）
    quantiles = [0, 0.25, 0.5, 0.75, 1.0]
    bin_edges = np.quantile(salary_ratios, quantiles)

    # 创建标签（自动命名）
    labels = [f'Q{i + 1}' for i in range(len(quantiles) - 1)]

    # 应用分组
    data['年龄分组'] = pd.cut(
        data['年龄'],
        bins=bin_edges,
        labels=labels,
        include_lowest=True
    )

    # 出差情况热编码
    data = pd.get_dummies(data, columns=['出差情况'])

    # 离家距离分组
    salary_ratios = data['距离（家）'].values

    # 确定分位数点（这里分成5组）
    quantiles = [0, 0.25, 0.5, 0.75, 1.0]
    bin_edges = np.quantile(salary_ratios, quantiles)

    # 创建标签（自动命名）
    labels = [f'Q{i + 1}' for i in range(len(quantiles) - 1)]

    # 应用分组
    data['距离分组'] = pd.cut(
        data['距离（家）'],
        bins=bin_edges,
        labels=labels,
        include_lowest=True
    )
    # 教育情况热编码
    data = pd.get_dummies(data, columns=['教育情况'])
    # 查看不同性别离职率
    # print(data.groupby('性别')['是否减员'].mean())
    # 一个0.15，一个0.16，不做特征筛选
    # 查看不同“工作角色”的离职率
    # print(data.groupby('工作角色')['是否减员'].mean())
    # 看着有影响，留着
    # 热编码
    data = pd.get_dummies(data, columns=['工作角色'])
    # 查看“婚姻情况”对离职率的影响
    # print(data.groupby('婚姻状况')['是否减员'].mean())
    # 看着有影响，留着
    # 热编码
    data = pd.get_dummies(data, columns=['婚姻状况'])
    # 对月收入进行分组
    bins_monthly_income = [0, 2000, 5000, 10000, 15000, 20000, 25000]
    labels_monthly_income = ['0-2000', '2001-5000', '5001-10000', '10001-15000', '15001-20000', '20001+']
    data['月收入分组'] = pd.cut(data['月收入'], bins=bins_monthly_income, labels=labels_monthly_income)
    data = pd.get_dummies(data, columns=['月收入分组'])
    # 对工作公司数量进行分组
    bins_working_company = [0, 1, 3, 8]
    labels_working_company = ['0', '1-3', '4+']
    data['工作公司数量分组'] = pd.cut(data['工作的公司数量'], bins=bins_working_company, labels=labels_working_company)
    # 合并到data
    data = pd.get_dummies(data, columns=['工作公司数量分组'])
    #  查看“工作公司数量”对离职率
    # print(data.groupby('工作公司数量分组')['是否减员'].mean())
    # data = pd.get_dummies(data, columns=['工作的公司数量'])
    # 查看加班频率对离职率
    print(data.groupby('加班')['是否减员'].mean())
    # 做特征筛选，留着
    data = pd.get_dummies(data, columns=['加班'])
    # 添加加薪绝对值 月工资*加薪百分比/100
    # data['加薪绝对值'] = data['月收入'] * data['加薪百分比'] / 100
    # 查看加薪绝对值对离职率TODO
    #添加职级工资平均值的与工资关系

    data['级别平均工资']=data.groupby(['级别','部门'])['月收入'].transform('mean')
    data['同组薪资水平']=data['月收入']/data['级别平均工资']*100
    # 对薪资水平分组
    # 原始数据
    salary_ratios = data['同组薪资水平'].values

    # 确定分位数点（这里分成5组）
    quantiles = [0, 0.25, 0.5, 0.75, 1.0]
    bin_edges = np.quantile(salary_ratios, quantiles)

    # 创建标签（自动命名）
    labels = [f'Q{i + 1}' for i in range(len(quantiles) - 1)]

    # 应用分组
    data['薪资水平分组'] = pd.cut(
        data['同组薪资水平'],
        bins=bin_edges,
        labels=labels,
        include_lowest=True
    )
    #查看薪资水平分组对离职率的影响

    print('___________________')
    data=data.drop(['部门', '部门.1', '员工编号',
                    'Over18', '性别',
               '加薪百分比','绩效评级'], axis=1)


    data.info()
    print(data.head(6))
    return data
# 查看‘绩效评级’对离职率影响
# TODO 暂定不要 print(data.groupby('绩效评级')['是否减员'].mean())
# TODO 特征工程完成开始用模型XGDboost


#加载数据
data=pd.read_csv("./data/train.csv", encoding='GBK')
data_test=pd.read_csv("./data/test2.csv", encoding='GBK')

# 特征处理
data_tezhen=tezhen(data)
data_test_tezhen=tezhen(data_test)


X_train = data_tezhen.drop('是否减员', axis=1)
y_train = data_tezhen['是否减员']
X_test=data_test_tezhen.drop('是否减员', axis=1)
y_test=data_test_tezhen['是否减员']
# 目标列


# 初始化模型
# 训练模型时保持不变
model = XGBClassifier(
    eval_metric='logloss',
    random_state=42,
    max_depth=2,
    enable_categorical=True,
    tree_method='hist',
    device='cuda'
)
# 训练模型
model.fit(X_train, y_train)

# 预测
y_pred = model.predict(X_test)

# 画图ROC曲线图

from sklearn.metrics import RocCurveDisplay

# 绘制 ROC 曲线
RocCurveDisplay.from_estimator(model, X_test, y_test)
plt.title("ROC Curve")
# 先保存再显示
plt.savefig("./data/ROC.jpg")
plt.show()

depth_range = range(1, 19)  # 3到15
results = []

# 将结果转为DataFrame
results_df = pd.DataFrame(results)

# 可视化结果

plt.figure(figsize=(12, 5))

#++++++++++++++++++++++
cm = confusion_matrix(y_test, y_pred)
plt.matshow(cm)
# 准确率
print("准确率：", accuracy_score(y_test, y_pred))
# 分类报告
print(classification_report(y_test, y_pred))
# 打印AUC
from sklearn.metrics import roc_auc_score
print("AUC:", roc_auc_score(y_test, model.predict_proba(X_test)[:, 1]))


for depth in depth_range:
    model = XGBClassifier(
        eval_metric='logloss',
        random_state=42,
        max_depth=depth,
        enable_categorical=True,
        tree_method='hist',
        device='cuda'
    )

    # 训练模型
    model.fit(X_train, y_train)
    # 验证集预测
    y_pred_proba = model.predict_proba(X_test)[:, 1]

    # 计算AUC
    auc = roc_auc_score(y_test, y_pred_proba)



    # 记录结果
    results.append({
        'depth': depth,
        'auc': auc,
    })

    print(f"Depth: {depth}, AUC: {auc}")

# 换一个模型，随机森林

# 放在文件顶部

# 定义模型函数
def sui(X_train, X_test, y_train, y_test):
    # 初始化模型
    model_sui = RandomForestClassifier(n_estimators=100, random_state=42)

    # 训练模型s
    model_sui.fit(X_train, y_train)

    # 预测与评估
    y_pred = model_sui.predict(X_test)
    y_proba = model_sui.predict_proba(X_test)[:, 1]

    # 输出结果
    print("准确率：", accuracy_score(y_test, y_pred))
    print(classification_report(y_test, y_pred))
    print("AUC:", roc_auc_score(y_test, y_proba))

# 调用函数
#直接使用逻辑回归模型
def log(X_train, X_test, y_train, y_test):
    model_log = LogisticRegression(solver='liblinear', max_iter=1000)
    model_log.fit(X_train, y_train)
    y_predict = model.predict(X_test)
    print(f"准确率:{model.score(X_test, y_test)}")

# sui(X_train, X_test, y_train, y_test)
# log(X_train, X_test, y_train, y_test)








