"""
人才流失  训练脚本开发
"""

# 1.导包
import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime

from imblearn.over_sampling import SMOTE

from utils.log import Logger
from xgboost import XGBRegressor, XGBClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error, root_mean_squared_error, accuracy_score
from sklearn.metrics import confusion_matrix, precision_score, recall_score, f1_score, roc_auc_score
import joblib
from sklearn.preprocessing import StandardScaler
import lightgbm as lgb

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15

"""
定义人才流失模型类，配置日志，获取数据源
"""


class AttritionModel:
    def __init__(self, path):
        # 1.定义日志文件名
        logfile_name = 'ten_train_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        # 2.创建日志对象
        self.logfile = Logger('../', logfile_name).get_logger()
        # 3.获取数据源
        self.data_source = pd.read_csv(path)


def ana_data(am: AttritionModel):
    """
    查看数据的整体性分析,输出：流失、不流失占比饼图
    :param am: 人才流失模型类
    :return:
    """
    # 0.获取数据源,数据备份
    data = am.data_source.copy()
    # 1.查看数据信息
    # data.info()

    # 2.流失、不流失占比分析（饼图）
    # 2.1 创建画布
    fig = plt.figure(figsize=(5, 10))
    #
    # 2.2获取流失人数、不流失人数
    data_lose = len(data[data['Attrition'] == 1])
    data_exist = len(data[data['Attrition'] == 0])
    # todo 第一个子图，饼图
    # 2.3设置饼图参数
    sizes = [data_lose, data_exist]
    labels = ['流失比例', '不流失比例']
    # 2.4绘制饼图
    ax1 = fig.add_subplot(211)
    ax1.pie(sizes, labels=labels, autopct='%1.1f%%')
    # 2.5设置标题
    ax1.set_title('人才流失情况占比图')

    # todo 第二个子图-柱状图-真实
    # 2柱状图x轴，y轴
    categories = ['流失', '不流失']
    values = [data_lose, data_exist]
    # 4绘制柱状图
    ax2 = fig.add_subplot(212)
    ax2.bar(categories, values)
    # 5设置标题
    ax2.set_title('人才流失情况柱状图')

    # 2.6 保存图像
    plt.savefig("../data/fig/人才流失情况_训练.png")
    # 2.7图像展示
    plt.show()


def data_processing(am: AttritionModel):
    # 0.获取数据源,数据备份
    data = am.data_source.copy()
    # 1.删除无关列
    data = data.drop(['EmployeeNumber',
                      'Over18',
                      'StandardHours'
                      ], axis=1)
    return data


def feature_engineering(data):
    """
    特征工程
    :param am: 人才流失模型类
    :return: 特征数据,特征列名
    """
    # 特征工程
    # data["age_group"] = pd.cut(data["Age"], bins=[0, 35, 45, 100], labels=[0, 1, 2]).astype(int)
    # data['JobSatisfaction_NumCompaniesWorked'] = data["JobSatisfaction"] / (data["NumCompaniesWorked"] + 1)
    # data["MonthlyIncome_TotalWorkingYears"] = data["MonthlyIncome"] / (data["TotalWorkingYears"] + 100)
    # data['Gender_MaritalStatus'] = data["Gender"] + "_" + data["MaritalStatus"]
    # department_dict = {
    #     "Human Resources": ["Human Resources"],
    #     "Research & Development": ["Life Sciences", "Medical", "Technical Degree"], "Sales": ["Marketing"]
    # }
    # data['Professional_counterparts'] = data.apply(
    #     lambda row: 1 if row['EducationField'] in department_dict.get(row['Department'], []) else 0,
    #     axis=1
    # )
    # data["YearsAtCompany_YearsInCurrentRole"] = data['YearsAtCompany'] - data['YearsInCurrentRole']
    # data['YearsInCurrentRole_YearsAtCompany'] = data['YearsInCurrentRole'] / (data['YearsAtCompany'] + 1)
    # data["EnvironmentSatisfaction_JobSatisfaction"] = data['EnvironmentSatisfaction'] * data['JobSatisfaction']
    data = pd.get_dummies(data, dtype=int)

    # data.info()
    return data


def model_train(data, logger):
    """
    模型训练、评估、保存---保存模型至../model/*.pkl
    :param data: 特征数据
    :param features: 特征列名
    :param logger: 日志对象
    :return:
    """
    # 1. 获取特征、标签
    y = data['Attrition']
    x = data.drop('Attrition', axis=1)
    x.info()

    # x.to_csv(r'../data/x.csv',index=False)
    # x.info()
    # 2.数据拆分
    x_train, x_test, y_train, y_test = (
        train_test_split(x,
                         y,
                         test_size=0.2,
                         random_state=11,
                         stratify=y)
    )

    # 3.特征工程（处理量纲问题）
    # transfer = StandardScaler()
    # x_train = transfer.fit_transform(x_train)
    # x_test = transfer.transform(x_test)
    # xy =pd.DataFrame(x_train)
    # print(type(xy))
    # xy.to_csv(r'../data/xy1.csv', index=False)

    # 4.网络搜索和交叉验证(),最优参数组合:{'learning_rate': 0.05, 'max_depth': 3, 'n_estimators': 100}
    # param_dict = {
    #     "n_estimators": [50, 100, 150, 200],
    #     'max_depth': [3, 5, 6, 7],
    #     'learning_rate': [0.01, 0.05, 0.1]
    # }
    # model = XGBRegressor()
    # gs = GridSearchCV(estimator=model, param_grid=param_dict, cv=5)
    # gs.fit(x_train, y_train)
    # logger.info(f"最优参数组合:{gs.best_params_}")

    # 5.模型实例化
    train_data = lgb.Dataset(x_train,
                             label=y_train)
    test_data = lgb.Dataset(x_test,
                            label=y_test)

    params = {
        'boosting_type': 'dart',
        'objective': 'binary',
        'metric': 'auc',
        'num_leaves': 31,
        'learning_rate': 0.05,
        'feature_fraction': 0.7
    }

    model = lgb.train(params, train_data, valid_sets=[test_data], num_boost_round=57)

    # 6.模型训练
    # model.fit(x_train, y_train)
    y_pred = model.predict(x_test)
    y_pred = np.where(y_pred > 0.22, 1, 0)

    # 7.模型评估   回归
    print(f"准确率：{accuracy_score(y_test, y_pred)}")
    print(f"精确率：{precision_score(y_test, y_pred)}")
    print(f"的召回率：{recall_score(y_test, y_pred)}")
    print(f"f1-score：{f1_score(y_test, y_pred)}")
    print(f"AUC：{roc_auc_score(y_test, y_pred, average='macro')}")

    # 8.模型保存
    joblib.dump(model, "../model/xgb_20251026.pkl")
    logger.info(f"模型保存成功，保存路径{os.path.abspath("../model/xgb_20251026.pkl")}")


if __name__ == '__main__':
    # todo 1.人才流失模型类
    am = AttritionModel(r'../data/train.csv')
    # am.logfile.info("日志测试")
    # data = am.data_source
    # data.info()
    # todo 2.数据探索性分析--EDA
    # ana_data(am)
    # todo 3.数据预处理
    data = data_processing(am)
    # data.info()
    # todo 3.特征工程测试
    data = feature_engineering(data)
    #
    # data.info()
    # todo 4.模型训练、评估、保存
    model_train(data, am.logfile)
