#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@FileName: train.py
@Author: Hugo Wang
@Date: 2025-06-06 11:07
@Project: company_talent_loss
"""
import datetime
import os.path
import pandas as pd
import numpy as np
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler  # 特征处理
import xgboost as xgb
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score, roc_auc_score
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import StratifiedKFold
from sklearn.model_selection import GridSearchCV
from utils.log import Logger
import joblib


class TalentLoss:
    def __init__(self, path):
        # 拼接日志名字
        logfile_name = 'train_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        self.logfile = Logger('../', logfile_name).get_logger()

        self.logfile.info("开始创建人才流失模型的对象模型")
        # 获取数据源
        self.data_source = pd.read_csv(path)

    def pre_data(self, dataSource):
        df = dataSource[
            ['Age', 'Department', 'DistanceFromHome', 'Education', 'EnvironmentSatisfaction', 'Gender', 'JobLevel',
             'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime', 'PercentSalaryHike',
             'PerformanceRating', 'RelationshipSatisfaction', 'StockOptionLevel', 'TotalWorkingYears',
             'WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion',
             'YearsWithCurrManager', 'Attrition']]
        df['MaritalStatus'] = df['MaritalStatus'].apply(lambda x: 0 if x in ['Divorced', 'Single'] else 1)
        df['Department'] = df['Department'].apply(lambda x: 0 if x in ['Sales', 'Human Resources'] else 1)
        #对列中object类型的列进行热编码处理
        df2 = pd.get_dummies(df)
        # print(df2.info())
        # 去掉object对象产生的含义重复的列
        df3 = df2.drop(['Gender_Male', 'OverTime_No'], axis=1)
        return df3
    def produce_model(self,processedData,logger):
        # 2.2 从预处理后的数据列中找出特征列和标签
        x = processedData[
            ['Age', 'EnvironmentSatisfaction', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome',
             'NumCompaniesWorked',
             'PercentSalaryHike', 'PerformanceRating', 'RelationshipSatisfaction', 'StockOptionLevel',
             'TotalWorkingYears',
             'WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion', 'Gender_Female',
             'OverTime_Yes']]
        # x = df3[['Age','EnvironmentSatisfaction','JobSatisfaction','MaritalStatus','MonthlyIncome','NumCompaniesWorked','PercentSalaryHike','PerformanceRating','RelationshipSatisfaction','StockOptionLevel','TotalWorkingYears','WorkLifeBalance','YearsAtCompany','YearsInCurrentRole','YearsSinceLastPromotion','Gender_Female','OverTime_Yes']]
        # x = df3[['Age','Department','DistanceFromHome','Education','EnvironmentSatisfaction','JobLevel','JobSatisfaction','MaritalStatus','MonthlyIncome','NumCompaniesWorked','PercentSalaryHike','PerformanceRating','RelationshipSatisfaction','StockOptionLevel','TotalWorkingYears','WorkLifeBalance','YearsAtCompany','YearsInCurrentRole','YearsSinceLastPromotion','YearsWithCurrManager','Gender_Female','OverTime_Yes']]
        y = processedData['Attrition']
        # 解决y编码问题
        le = LabelEncoder()
        y = le.fit_transform(y)
        # 2.3 数据集的切分
        x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=100, stratify=y)
        # 特征的标准化处理
        transfer = StandardScaler()
        x_train = transfer.fit_transform(x_train)
        x_test = transfer.transform(x_test)
        # 模型1： 使用决策树模型
        dfc = DecisionTreeClassifier(max_depth=8, random_state=45)
        dfc.fit(x_train, y_train)
        # 模型预测
        y_pred = dfc.predict(x_test)
        print(f"准确率：{accuracy_score(y_test, y_pred)}")
        print(f"AUC值：{roc_auc_score(y_test, y_pred)}")
        print("==============================================")
        #保存模型
        # joblib.dump(dfc,'../model/2025060612_dtcModel.pkl')
        # logger.info(f"模型保存的绝对路径是：{os.path.abspath('../model/2025060612_dtcModel.pkl')}")

        #模型2：xgboost
        # 使用xgboost模型
        xgbModel = xgb.XGBClassifier(n_estimators=50, objective='multi:softmax', eval_metric='merror', eta=0.1,
                                     random_state=45, num_class=len(le.classes_))
        # xgbModel.fit(x_train,y_train)
        # #模型预测
        # y_pred = xgbModel.predict(x_test)
        # print(f"准确率：{accuracy_score(y_test,y_pred)}")
        # print(f"AUC值：{roc_auc_score(y_test,y_pred)}")

        # 3 交叉验证时,采用分层抽取
        spliter = StratifiedKFold(n_splits=5, shuffle=True, random_state=45)
        # 3.1 定义超参数
        param_grid = {'max_depth': np.arange(3, 5, 1), 'n_estimators': np.arange(50, 100, 150),
                      'eta': np.arange(0.4, 1, 0.3)}
        model = GridSearchCV(estimator=xgbModel, param_grid=param_grid, cv=spliter)
        model.fit(x_train, y_train)
        # 3.2 模型评估
        y_pred = model.predict(x_test)
        print(f"准确率：{accuracy_score(y_test, y_pred)}")
        print(f"AUC值：{roc_auc_score(y_test, y_pred)}")
        print(f"最好的参数组合：{model.best_params_}")
        #3.3 模型保存
        # joblib.dump(model,'../model/2025060612_xgbModel.pkl')
        # logger.info(f"模型保存的绝对路径是：{os.path.abspath('../model/2025060612_xgbModel.pkl')}")


if __name__ == '__main__':
    tl = TalentLoss('../data/train.csv')
    #1.获取train的源数据
    dataSource = tl.data_source
    #2.数据基本处理：
    # 2.1获取预处理之后的数据
    processedData = tl.pre_data(dataSource)
    # print(processedData.info())
    #3.输出模型
    tl.produce_model(processedData,tl.logfile)

