# 导包
import joblib
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.utils.class_weight import compute_sample_weight
from xgboost import XGBClassifier
import seaborn as sns

# 解决中文乱码问题
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

def ana_data(data):
    data = data.copy(deep=True)

    #1.数据整体情况
    # print(f'数据整体情况:\n{data.info}')
    # print(f'数据前五列:\n{data.head()}')
    # print(f'数据列表名字:\n{data.columns}')

    # 2.检查异常点并且可视化
    # 2.1计算异常点数量
    null_counts = data.isnull().sum()
    # 2.2绘制柱状图
    plt.figure(figsize=(10, 6))
    plt.bar(null_counts.index, null_counts.values)
    plt.xticks(rotation=90)  # 避免x轴标签重叠，旋转标签
    plt.ylabel('异常点数量')
    plt.title('数据集中各列的异常点数量')
    plt.tight_layout()  # 自动调整子图参数，使之填充整个图像区域
    plt.savefig('../result/各个特征的异常点数量.png')

    # 3.显示各个特征的种类数量并且可视化
    # 3.1计算每个特征的唯一值数量
    unique_counts = data.nunique()
    # 3.2绘图
    plt.figure(figsize=(12, 6))
    bars = plt.bar(unique_counts.index, unique_counts.values, color='skyblue')
    # 3.3添加数值标签
    for bar in bars:
        yval = bar.get_height()
        plt.text(bar.get_x() + bar.get_width() / 2.0, yval + 0.5, int(yval),
                 ha='center', va='bottom', fontsize=10)
    # 3.4设置坐标轴与标题
    plt.xticks(rotation=90)
    plt.ylabel('唯一值数量')
    plt.title('数据集中各特征的类别数量', fontsize=14)
    # 3.5去掉顶部和右边框
    ax = plt.gca()
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    # 3.6自动调整布局
    plt.tight_layout()
    # 3.7模型保存
    plt.savefig('../result/各个特征的种类数量.png')

    # 4.显示图形
    plt.show()
def plot_correlation_heatmap(data, save_path='../result/correlation_heatmap.png'):
    """
    绘制特征相关性热力图
    :param data: DataFrame，已经完成特征工程的数据
    :param save_path: 热力图保存路径
    """
    plt.figure(figsize=(14, 12))
    corr_matrix = data.corr()  # 计算相关性矩阵

    # 使用 seaborn 绘制热力图
    sns.heatmap(corr_matrix, annot=True, fmt=".2f", cmap='coolwarm', square=True,
                cbar_kws={"shrink": .8}, linewidths=.5, annot_kws={"size": 8})

    plt.title('特征相关性热力图', fontsize=16)
    plt.xticks(rotation=45, fontsize=8)
    plt.yticks(rotation=45, fontsize=8)
    plt.tight_layout()
    plt.savefig(save_path)
    plt.show()

# 特征工程
def feature_engineering(data):
    data = data.copy(deep=True)
    # 1.删除无用列
    data = data.drop(['Over18', 'StandardHours', 'EmployeeNumber'], axis=1)
    # 2.使用LabelEncoder修改EducationField、JobRole、MaritalStatus的值
    data['EducationField'] = LabelEncoder().fit_transform((data['EducationField']))
    data['JobRole'] = LabelEncoder().fit_transform(data['JobRole'])
    data['MaritalStatus'] = LabelEncoder().fit_transform(data['MaritalStatus'])
    # 3.采用mapping映射方法
    # 手动定义映射关系,修改BusinessTravel的值
    mapping1 = {'Non-Travel': 0, 'Travel_Rarely': 1, 'Travel_Frequently': 2}
    # 使用map()替换
    data['BusinessTravel'] = data['BusinessTravel'].map(mapping1)
    # 手动定义映射关系,修改Department的值
    mapping2 = {'Human Resources': 1, 'Research & Development': 2, 'Sales': 3}
    data['Department'] = data['Department'].map(mapping2)
    # 4.采用热编码处理Gender、OverTime
    data = pd.get_dummies(data, columns=['Gender', 'OverTime'], drop_first=True)

    return data


# 数据集划分
def train_test_sp(data):
    x = data.iloc[:, 1:]
    y = data.iloc[:, 0]
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2,
                                                        random_state=88, stratify=y)
    return x_train, y_train, x_test, y_test


def train_grid_search(x_train, y_train):
    # param_dict = {
    #     'n_estimators': [268, 279, 241, 282, 293, 272],
    #     'max_depth': [1, 3, 2],
    #     'learning_rate': [0.23, 0.21, 0.1, 0.24, 0.22]
    # }
    param_dict = {
        'n_estimators': [i for i in range(260,281)],
        'max_depth': [1, 3, 2],
        'learning_rate': [0.20,0.21,0.22,0.23,0.24,0.25,0.26,0.27,0.28,0.29,0.30,]
    }
    cls_weight = compute_sample_weight('balanced', y_train)
    skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=88)
    gsc = GridSearchCV(estimator=XGBClassifier(), param_grid=param_dict, cv=skf, scoring='roc_auc')
    gsc.fit(x_train, y_train, sample_weight=cls_weight)
    print(f"最佳参数best_params:{gsc.best_params_}")
    print(f"最佳评分best_score:{gsc.best_score_}")


def model_train(x_train, y_train, x_test, y_test):
    cls_weight = compute_sample_weight('balanced', y_train)
    xgb_model = XGBClassifier(n_estimators=275, max_depth=1, learning_rate=0.27)
    #预测0.87
    #xgb_model = XGBClassifier(n_estimators=268, max_depth=1, learning_rate=0.23)
    #预测0.87
    #xgb_model = XGBClassifier(n_estimators=272, max_depth=1, learning_rate=0.23)
    #预测0.86
    # xgb_model = XGBClassifier(n_estimators=505, max_depth=1, learning_rate=0.1)
    xgb_model.fit(x_train, y_train, sample_weight=cls_weight)
    y_pre = xgb_model.predict_proba(x_test)[:, 1]
    print(f'roc_auc_score:{roc_auc_score(y_test, y_pre)}')
    joblib.dump(xgb_model, '../model/xgb.pkl')


if __name__ == '__main__':
    # 加载数据集
    data = pd.read_csv('../../data/train.csv')
    # 2.分析数据
    # ana_data(data)
    data = feature_engineering(data)
    plot_correlation_heatmap(data)
    x_train, y_train, x_test, y_test = train_test_sp(data)
    # train_grid_search(x_train, y_train)
    model_train(x_train, y_train, x_test, y_test)
