import os
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import roc_auc_score, make_scorer, f1_score
from xgboost import XGBClassifier
import lightgbm as lgb
from sklearn.preprocessing import StandardScaler
from imblearn.over_sampling import SMOTE
from my_utils.log import Logger
import joblib

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15


# 1 定义人才流失模型类 配置日志 获取数据源
class BrainDrainModel:
    def __init__(self, path):
        logfile_name = 'train_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        self.logfile = Logger('../', logfile_name).get_logger()
        self.data_source = pd.read_csv(path)


# 2 特征工程
def feature_engineering(data, logger):
    feature_data = data.copy()
    feature_data.drop(['EmployeeNumber', 'Over18', 'StandardHours'], axis=1, inplace=True)
    feature_data = pd.get_dummies(feature_data)
    feature_data['AgeGroup']=pd.cut(data['Age'],bins=[0,35,45,100],labels=[0,1,2]).astype(int)
    # feature_data["月收入与工作年限比例"] = data["MonthlyIncome"] / (data["TotalWorkingYears"] + 100)
    # feature_data['月收入与教育程度比值'] =data['MonthlyIncome']/data['Education']
    #print(feature_data.columns)
    feature_columns = ['Age','DistanceFromHome', 'Education',
       'EnvironmentSatisfaction', 'JobInvolvement', 'JobLevel',
       'JobSatisfaction', 'MonthlyIncome', 'NumCompaniesWorked',
       'PercentSalaryHike', 'PerformanceRating', 'RelationshipSatisfaction',
        'StockOptionLevel', 'TotalWorkingYears',
       'TrainingTimesLastYear', 'WorkLifeBalance', 'YearsAtCompany',
       'YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager',
       'BusinessTravel_Non-Travel', 'BusinessTravel_Travel_Frequently',
       'BusinessTravel_Travel_Rarely', 'Department_Human Resources',
       'Department_Research & Development', 'Department_Sales',
       'EducationField_Human Resources', 'EducationField_Life Sciences',
       'EducationField_Marketing', 'EducationField_Medical',
       'EducationField_Other', 'EducationField_Technical Degree',
       'Gender_Female', 'Gender_Male', 'JobRole_Healthcare Representative',
       'JobRole_Human Resources', 'JobRole_Laboratory Technician',
        'JobRole_Manufacturing Director',
       'JobRole_Research Director', 'JobRole_Research Scientist',
       'JobRole_Sales Executive', 'JobRole_Sales Representative',
       'MaritalStatus_Divorced', 'MaritalStatus_Married',
       'MaritalStatus_Single', 'OverTime_No', 'OverTime_Yes',
       'AgeGroup']
    return feature_data, feature_columns


# xgboost
def model_train_xgboost(data, features, logger):
    # 1.数据划分
    x = data[features]
    y = data['Attrition']
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=23, stratify=y)
    smote = SMOTE(random_state=18)
    x_train, y_train = smote.fit_resample(x_train, y_train)
    scaler=StandardScaler()
    x_train=scaler.fit_transform(x_train)
    x_test=scaler.fit(x_test)
    # 2.网格化搜索与交叉验证
    param_grid = {
        'n_estimators': [250],
        'max_depth': [10],
        'learning_rate': [0.01]
    }
    model = XGBClassifier(random_state=18)
    model = GridSearchCV(model, param_grid, cv=5)
    model.fit(x_train, y_train)
    logger.info(f'网格化搜索参数：{model.best_params_}')
    # 3.模型实例化
    # model = XGBClassifier()
    # # 4.模型训练
    # model.fit(x_train, y_train)
    # 5.模型评估
    y_score = model.predict_proba(x_test)[:, 1]
    logger.info(f"xgboost模型AUC:{roc_auc_score(y_test, y_score)}")
    # 6.模型保存
    model_path = "../model/xgboost_20251027.pkl"
    joblib.dump(model, model_path)
    logger.info(f"模型保存成功，保存路径{os.path.abspath(model_path)}")


# lightgbm
def model_train_lightgbm(data, features, logger):
    # 1.数据划分
    x = data[features]
    y = data['Attrition']
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=23, stratify=y)
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    train_dataset = lgb.Dataset(x_train, label=y_train)
    test_dataset = lgb.Dataset(x_test, label=y_test, reference=train_dataset)
    # 2.网格化搜索与交叉验证
    params = {
        'objective': 'binary',
        'metric': 'auc',
        'learning_rate': 0.1,
        'num_leaves': 31,
        'max_depth': 4,
        'min_child_samples': 20,
        'feature_fraction': 0.8,
        'bagging_fraction': 0.8,
        'bagging_freq': 5,
        'verbose': -1,
        'random_state': 23
    }

    model = lgb.train(
        params,
        train_dataset,
        num_boost_round=500,
        valid_sets=[test_dataset],
        callbacks=[
            lgb.log_evaluation(50),  # 每50轮输出一次日志
            lgb.early_stopping(20)  # 早停
        ]
    )
    # 3.模型实例化
    # model = lgb.LGBMClassifier()
    # 4.模型训练
    # model.fit(x_train, y_train)
    # 5.模型评估
    y_score = model.predict(x_test)
    logger.info(f"lightgbm模型AUC:{roc_auc_score(y_test, y_score)}")
    # 6.模型保存
    model_path = "../model/lightgbm_20251027.pkl"
    joblib.dump(model, model_path)
    logger.info(f"模型保存成功，保存路径{os.path.abspath(model_path)}")


if __name__ == '__main__':
    bdm = BrainDrainModel('../data/train.csv')
    feature_data, feature_columns = feature_engineering(bdm.data_source, bdm.logfile)
    model_train_lightgbm(feature_data, feature_columns, bdm.logfile)
