import os
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from haojiaoyang.utils.log import Logger
from haojiaoyang.utils.common import load_data
from wangmingyang.src.trainXXX import pre_data
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
import joblib  # 保存/加载模型
from sklearn.preprocessing import StandardScaler, MinMaxScaler  # 特征工程 数据标准化/归一化
from sklearn.neighbors import KNeighborsClassifier  # KNN算法 分类对象
from sklearn.ensemble import (RandomForestClassifier,  # 随机森林
                              GradientBoostingClassifier,  # 梯度提升树
                              AdaBoostClassifier,
                              AdaBoostRegressor, )
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder  # 将标签进行转换为数字编码
import xgboost as xgb
from sklearn.metrics import precision_score, recall_score, f1_score, classification_report, roc_auc_score
from lightgbm.sklearn import LGBMClassifier
def pre_data(file_path):
    df = pd.read_csv(file_path)
    # 被删除的特征列
    df.drop(
        ['Department', 'EmployeeNumber', 'Gender', 'Over18', 'StandardHours', 'EducationField', 'JobRole'],
        axis='columns', inplace=True)

    df['MaritalStatus'] = df['MaritalStatus'].map({
        'Divorced': 0,
        'Single': 1,
        'Married': 2
    }).fillna(-1).astype(int)

    df['BusinessTravel'] = df['BusinessTravel'].map({
        'Travel_Rarely': 0,
        'Travel_Frequently': 1,
        'Non-Travel': 2
    }).fillna(-1).astype(int)

    # print(df2.info())
    # 去掉object对象产生的含义重复的列
    # df = df2.drop(['Gender_Male', 'OverTime_No'], axis=1)
    df['JobSatisfactionScore'] = (df['JobSatisfaction'] * 0.3 +
                                  df['EnvironmentSatisfaction'] * 0.2 +
                                  df['RelationshipSatisfaction'] * 0.2 +
                                  df['WorkLifeBalance'] * 0.15 +
                                  df['JobInvolvement'] * 0.15)
    # 计算员工稳定性（工作满意度/（呆过公司数量+1））
    df['EmployeeStability'] = (df['JobSatisfaction'] / (df['NumCompaniesWorked'] + 1))
    # 工作压力('WorkPressure') = 是否加班(0verTime)x(5 - 工作生活平衡度WorkLifeBalance)
    df['OverTime_num'] = df['OverTime'].map({'Yes': 1, 'No': 0})

    # 工作压力('WorkPressure') = 是否加班(0verTime)x(5 - 工作生活平衡度WorkLifeBalance)
    df['WorkPressure'] = df['OverTime_num'] * (5 - df['WorkLifeBalance'])
    # 资源满意度(ResourceSatisfaction) = (工作环境满意度 (EnvironmentSatisfaction)+ 人际关系满意度
    df['ResourceSatisfaction'] = (df['EnvironmentSatisfaction'] + df['RelationshipSatisfaction']) / 2

    # 职业停滞(CareerStagnation) = 自上次晋升以来的年限(YearsSinceLastPromotion)/(在公司工作年限
    df['CareerStagnation'] = df['YearsSinceLastPromotion'] / (df['YearsAtCompany'] + 0.001)

    # 分离标签
    x = df.drop('Attrition', axis=1)  # 等价于 df.iloc[:, 1:]，但语义更明确
    y = df['Attrition']

    x = pd.get_dummies(x).drop('OverTime_No', axis=1)

    return x, y

def train_model(x, y):
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=25)
    scaler = StandardScaler()
    x_train = scaler.fit_transform(x_train)
    le = LabelEncoder()
    y_train = le.fit_transform(y_train)

    # 1.XGB模型实例化
    param_dict = {
        'n_estimators': [50, 100, 150, 200],
        'max_depth': [3, 5, 6, 7],
        'learning_rate': [0.01, 0.1]
    }


    es = XGBRegressor(learning_rate=0.1, n_estimators=50, max_depth=3,objective='multi:softmax', eval_metric='merror',
                      random_state=25,num_class=len(le.classes_))
    # 2-1：创建网格搜索
    es = GridSearchCV(estimator=es, param_grid=param_dict, cv=5)
    # 2-2:模型训练
    es.fit(x_train, y_train)
    y_pred = es.predict(x_test)
    # 2-3:打印最优超参组合
    print(f"最优参数组合:{es.best_params_}")
    print(f"XGB AUC值:{roc_auc_score(y_test, y_pred)}")


    # 2.KNN模型实例化
    es2 = KNeighborsClassifier(n_neighbors=5)
    # 模型训练
    es2.fit(x_train, y_train)
    y_pre2 = es2.predict_proba(x_test)[:, 1]
    print(f"KNN_ROC曲线{roc_auc_score(y_test, y_pre2)}")


    # 3.ADB模型实例化
    es3 = AdaBoostClassifier(n_estimators=150, learning_rate=0.1, random_state=25)
    # 模型训练
    es3.fit(x_train, y_train)
    y_pre3 = es3.predict_proba(x_test)[:, 1]
    print(f"ADB_ROC曲线{roc_auc_score(y_test, y_pre3)}")


    # 4.LOG模型实例化
    # 超参查询
    param_grid_log = {
        'C': [0.01, 0.5, 0.9, 1, 5, 15, 25, 50],  # 正则化强度的倒数
        'penalty': ['l1', 'l2'],  # 正则化类型
        'solver': ['liblinear']
    }
    # 交叉折数查询
    es4 = LogisticRegression(C=5, penalty='l1', solver='liblinear', random_state=25)
    # 2-1：创建网格搜索
    es4 = GridSearchCV(estimator=es4, param_grid=param_grid_log, cv=4)
    # 2-2:模型训练
    es4.fit(x_train, y_train)
    print(f"LOG_ROC曲线{roc_auc_score(y_test, es4.predict_proba(x_test)[:, 1])}")
    print(f"最优参数组合:{es4.best_params_}")


    # 5.LGB模型实例化
    es5 = LGBMClassifier(max_depth=2, objective='binary')
    # 模型训练
    es5.fit(x_train, y_train)
    y_pre5 = es5.predict_proba(x_test)[:, 1]
    print(f"LGB_ROC曲线{roc_auc_score(y_test, y_pre5)}")
    return  es,es2,es3,es4,es5,scaler


if __name__ == '__main__':
    # 获取训练集的x,y
    x, y = pre_data('../data/train.csv')
    # 获取测试集的x,y
    x_test, y_test = pre_data('../data/test2.csv')


    # 调用 train_model 函数进行训练
    es,es2,es3,es4,es5,scaler = train_model(x, y)

    x_test = scaler.fit_transform(x_test)
    y_pred1 = es.predict(x_test)
    y_pred2 = es2.predict_proba(x_test)[:, 1]
    y_pred3 = es3.predict_proba(x_test)[:, 1]
    y_pred4 = es4.predict_proba(x_test)[:, 1]
    y_pred5 = es5.predict_proba(x_test)[:, 1]
    print(f"XGB AUC:{roc_auc_score(y_test, y_pred1)}")
    print(f"KNN AUC:{roc_auc_score(y_test, y_pred2)}")
    print(f"ADB AUC:{roc_auc_score(y_test, y_pred3)}")
    print(f"LOR AUC:{roc_auc_score(y_test, y_pred4)}")
    print(f"LGB AUC:{roc_auc_score(y_test, y_pred5)}")


