import os
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import roc_auc_score
from sklearn.preprocessing import StandardScaler
from my_utils.log import Logger
from sklearn.linear_model import LogisticRegression
from sklearn.ensemble import RandomForestClassifier
from xgboost import XGBClassifier
import joblib
from imblearn.over_sampling import SMOTE

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15


# 1 定义人才流失模型类 配置日志 获取数据源
class BrainDrainModel:
    def __init__(self, path):
        logfile_name = 'train_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        self.logfile = Logger('../', logfile_name).get_logger()
        self.data_source = pd.read_csv(path)


# 2 特征工程
def feature_engineering(data, logger):
    feature_data = data.copy()
    # feature_data.drop(['EmployeeNumber', 'Over18', 'StandardHours'], axis=1, inplace=True)
    feature_data = pd.get_dummies(feature_data)
    feature_data['AgeGroup'] = pd.cut(data['Age'], bins=[0, 35, 45, 100], labels=[0, 1, 2]).astype(int)
    feature_data['a']=pd.cut(data['MonthlyIncome'], bins=[0, 3500, 7000, 10000, 15000, 20000], labels=[0, 1, 2, 3, 4]).astype(int)
    feature_data["月收入与工作年限比例"] = data["MonthlyIncome"] / (data["TotalWorkingYears"] + 100)
    feature_data['aa'] = (feature_data['TotalWorkingYears'] + 1) / (feature_data['YearsAtCompany'] + 1)#本公司工作时长占比
    feature_data['bb'] = feature_data['YearsSinceLastPromotion'] / (feature_data['YearsAtCompany'] + 1)#升职速度
    feature_data['cc'] = feature_data['YearsInCurrentRole'] * (feature_data['YearsAtCompany']+1)#滞留该岗位时间
    feature_data['dd'] = ((feature_data['JobInvolvement']>=3) & (feature_data['JobSatisfaction'] <=2))#风险指标

    feature_columns = ['Age', 'DistanceFromHome', 'Education',
                       'EnvironmentSatisfaction', 'JobInvolvement', 'JobLevel',
                       'JobSatisfaction', 'MonthlyIncome', 'NumCompaniesWorked',
                       'PercentSalaryHike', 'PerformanceRating', 'RelationshipSatisfaction',
                       'StandardHours', 'StockOptionLevel', 'TotalWorkingYears',
                       'TrainingTimesLastYear', 'WorkLifeBalance', 'YearsAtCompany',
                       'YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager',
                       'BusinessTravel_Non-Travel', 'BusinessTravel_Travel_Frequently',
                       'BusinessTravel_Travel_Rarely', 'Department_Human Resources',
                       'Department_Research & Development', 'Department_Sales',
                       'EducationField_Human Resources', 'EducationField_Life Sciences',
                       'EducationField_Marketing', 'EducationField_Medical',
                       'EducationField_Other', 'EducationField_Technical Degree',
                       'Gender_Female', 'Gender_Male', 'JobRole_Healthcare Representative',
                       'JobRole_Human Resources', 'JobRole_Laboratory Technician',
                       'JobRole_Manager', 'JobRole_Manufacturing Director',
                       'JobRole_Research Director', 'JobRole_Research Scientist',
                       'JobRole_Sales Executive', 'JobRole_Sales Representative',
                       'MaritalStatus_Divorced', 'MaritalStatus_Married',
                       'MaritalStatus_Single', 'OverTime_No', 'OverTime_Yes',
                       'AgeGroup', "月收入与工作年限比例", 'aa', 'bb', 'cc','dd','a']
    return feature_data, feature_columns


# 4.模型训练，评估，保存
# def KNN_model_train(data, features, logger):
#     # 1.数据划分
#     x = data[features]
#     y = data['Attrition']
#     x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=23)
#     # 2.网格化搜索与交叉验证
#     # param_grid = {
#     #     'n_estimators': [100, 200, 500],
#     #     'max_depth': [3, 5, 9],
#     #     'learning_rate': [0.1, 0.01, 0.05],
#     #     'random_state': [18, 23, 666]
#     # }
#     # model = XGBRegressor()
#     # grid_search = GridSearchCV(model, param_grid, cv=3)
#     # grid_search.fit(x_train, y_train)
#     # logger.info(f'网格化搜索参数：{grid_search.best_params_}')
#     # 3.模型实例化
#     model = KNeighborsClassifier(n_neighbors=5)
#
#     model.fit(x_train, y_train)
#
#     # 5.模型评估
#     y_score = model.predict_proba(x_test)[:, 1]
#     logger.info(f"knn模型AUC:{roc_auc_score(y_test, y_score)}")
#
#     joblib.dump(model, "../model/knn_20251026.pkl")
#     # logger.info(f"模型保存成功，保存路径{os.path.abspath('../model/Knn_20251025.pkl')}")


def Log_model_train(data, features, logger):
    x = data[features]
    y = data['Attrition']
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=23)

    bz = StandardScaler()
    x_train = bz.fit_transform(x_train)
    model = LogisticRegression()
    dict = {

    }
    model = GridSearchCV(model, dict, cv=5)

    # 4.模型训练
    model.fit(x_train, y_train)

    # 5.模型评估
    y_score = model.predict_proba(x_test)[:, 1]
    # logger.info(f"log模型AUC:{roc_auc_score(y_test, y_score)}")

    # 6.模型保存
    joblib.dump(model, "../model/Log_20251026.pkl")


def model_train_xgboost(data, features, logger):
    # 1.数据划分
    x = data[features]
    y = data['Attrition']
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=23, stratify=y)
    smote = SMOTE(random_state=42)
    x_train, y_train = smote.fit_resample(x_train, y_train)
    # 2.网格化搜索与交叉验证
    param_grid = {
        'n_estimators': [900],
        'max_depth': [5],
        'learning_rate': [0.2],
        'random_state': [2000],
        'subsample': [0.5],
        'colsample_bytree': [0.9],
        'reg_lambda': [40]
    }
    model = XGBClassifier()
    grid_search = GridSearchCV(model, param_grid, cv=3)
    grid_search.fit(x_train, y_train)
    logger.info(f'网格化搜索参数：{grid_search.best_params_}')
    # 3.模型实例化
    # 4.模型训练
    model.fit(x_train, y_train)
    # 5.模型评估
    y_score = model.predict_proba(x_test)[:, 1]
    logger.info(f"xgboost模型AUC:{roc_auc_score(y_test, y_score)}")
    # 6.模型保存
    model_path = "../model/xgboost_20251027.pkl"
    joblib.dump(model, model_path)
    logger.info(f"模型保存成功，保存路径{os.path.abspath(model_path)}")


# def Rand_model_train(data, features, logger):
#     x = data[features]
#     y = data['Attrition']
#     x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=23)
#
#     bz = StandardScaler()
#     x_train = bz.fit_transform(x_train)
#
#     model = RandomForestClassifier()
#     dict = {
#         'n_estimators':[100,200,300],
#         'max_features':["sqrt", 0.2, 0.3, 0.5, 0.7],
#         'max_depth':[10,12,15],
#         'bootstrap':[True,]
#     }
#     model = GridSearchCV(model, dict, cv=5)
#
#     # 4.模型训练
#     model.fit(x_train, y_train)
#
#     # 5.模型评估
#     y_score = model.predict_proba(x_test)[:, 1]
#     print(model.best_params_)
#     logger.info(f"rand模型AUC:{roc_auc_score(y_test, y_score)}")
#
#     # 6.模型保存
#     joblib.dump(model, "../model/Rand_20251026.pkl")


if __name__ == '__main__':
    bdm = BrainDrainModel('../data/train.csv')
    feature_data, feature_columns = feature_engineering(bdm.data_source, bdm.logfile)
    # model_train_lightgbm(feature_data, feature_columns, bdm.logfile)
    model_train_xgboost(feature_data, feature_columns, bdm.logfile)
