import os
import pandas as pd
import numpy as np
import datetime
import matplotlib.pyplot as plt
from haojiaoyang.utils.log import Logger
from haojiaoyang.utils.common import load_data
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
import joblib                                                  # 保存/加载模型
from sklearn.preprocessing import StandardScaler,MinMaxScaler  # 特征工程 数据标准化/归一化
from sklearn.neighbors import KNeighborsClassifier             # KNN算法 分类对象
from sklearn.ensemble import (RandomForestClassifier,          # 随机森林
                             GradientBoostingClassifier,       # 梯度提升树
                             AdaBoostClassifier,
                             AdaBoostRegressor,)
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import LabelEncoder                 # 将标签进行转换为数字编码
import xgboost as xgb
from sklearn.metrics import precision_score,recall_score,f1_score,classification_report,roc_auc_score



# 1、定义人才流失模型类
# 1-1 目标一：配置日志
# 1-2 目标二：获取数据源
class TalentlossModel:
    # 初始化属性
    def __init__(self, path):
        # 拼接日志名字
        logfile_name = 'train_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        # 创建日志对象
        self.logfile = Logger("../", logfile_name).get_logger()
        # 测试一下
        self.logfile.info("开始创建人才流失模型的对象类啦！！！")
        # 获取数据源
        self.data_source = load_data(path)


def ana_data(data):
    """
    1.查看数据整体情况
    2.离职情况整体的分布情况
    """
    # 1.加载数据 拷贝一份源数据
    ana_data = data.copy()
    # 2.查看数据整体情况
    # ana_data.info()
    # 3.离职整体的分布情况
    # 3.1创建画布
    fig = plt.figure(figsize=(20,60))
    # 3.2添加子图
    ax1 = fig.add_subplot(411)
    # bar条形图
    ax1.bar(ana_data['Attrition'].value_counts().index, ana_data['Attrition'].value_counts().values)
    ax1.set_title('员工离职情况分布')
    ax1.set_xlabel('是否离职')
    ax1.set_ylabel('人数')
    ax1.set_xticks([0, 1])  # 设置刻度位置
    ax1.set_xticklabels(['在职', '离职'])  # 设置刻度标签

    # 4.不同部门的离职情况
    department_attrition = data.groupby('Department')['Attrition'].mean().reset_index()
    ax2 = fig.add_subplot(412)
    ax2.bar(department_attrition['Department'], department_attrition['Attrition'])
    ax2.set_title('不同部门的离职率')
    ax2.set_xlabel('部门')
    ax2.set_ylabel('离职率')
    ax2.set_xticks([0, 1, 2])
    ax2.set_xticklabels(['人力资源', '研究开发','销售'])  # 设置刻度标签

    # 5.不同工作角色的离职情况
    JobRole_attrition = data.groupby('JobRole')['Attrition'].mean().reset_index()
    ax3 = fig.add_subplot(413)
    ax3.bar(JobRole_attrition['JobRole'], JobRole_attrition['Attrition'])
    ax3.set_title('工作角色的离职率')
    ax3.set_xlabel('工作角色')
    ax3.set_ylabel('离职率')
    ax3.tick_params(axis='x', rotation=45)

    # 保存分析图到本地
    plt.savefig("../data/负荷整体分布情况图.png")
    plt.show()


def feature_engineering(data, logger):
    # 先拷贝一份源数据
    feature_data = data.copy()

    # 获取所有 object 类型的列名
    object_columns = feature_data.select_dtypes(include=['object']).columns

    # 使用 LabelEncoder 进行标签编码
    le = LabelEncoder()
    for col in object_columns:
        feature_data[col] = le.fit_transform(feature_data[col])

    # return feature_data.info()
    # return  feature_data.to_string()
    return feature_data,object_columns

def train_model(data,features,logger):
    # 1.数据集切分
    # X_columns = ['Age','Department','EnvironmentSatisfaction', 'JobInvolvement', 'OverTime', 'PercentSalaryHike', 'PerformanceRating',
    #  'RelationshipSatisfaction', 'WorkLifeBalance']

    X_columns = ['OverTime','YearsAtCompany','YearsWithCurrManager','JobLevel','StockOptionLevel','JobSatisfaction','EnvironmentSatisfaction',
                 'WorkLifeBalance','JobRole','Department','MonthlyIncome','Age']
    x = feature_data[X_columns]
    y = feature_data['Attrition']

    # 标准化数据
    scaler = StandardScaler()
    x = scaler.fit_transform(x)
    le = LabelEncoder()
    y = le.fit_transform(y)

    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.3, random_state=25)

    # 定义超参数搜索空间
    #2、创建xgboost模型对象  （极限梯度提升树）
    #定义超参字典
    param_dict={
        'n_estimators':[50,100,150,200],
        'max_depth':[3,5,6,7],
        'learning_rate':[0.01,0.1]
    }


    es=XGBRegressor()
    #2-1：创建网格搜索
    gs_es =GridSearchCV(estimator=es,param_grid=param_dict,cv=5)
    #2-2:模型训练
    gs_es.fit(x_train,y_train)
    #2-3:打印最优超参组合
    logger.info(f"最优参数组合:{gs_es.best_params_}")


    # 1.xgb模型实例化
    es = xgb.XGBClassifier(n_estimators=50, objective='multi:softmax', eval_metric='merror', learning_rate=0.1,
                                 random_state=25, max_depth=3,num_class=len(le.classes_))
    #模型训练
    es.fit(x_train, y_train)
    y_pre = es.predict_proba(x_test)[:, 1]
    print(f"xgb_ROC曲线{roc_auc_score(y_test,y_pre)}")
    # 模型保存
    joblib.dump(es,"../model/xgb_20250604.pkl")
    logger.info(f"模型保存成功，保存路径{os.path.abspath("../model/xgb_20250604.pkl")}")



    # 2.KNN模型实例化
    es2 = KNeighborsClassifier(n_neighbors=5)
    #模型训练
    es2.fit(x_train, y_train)
    y_pre2 = es2.predict_proba(x_test)[:, 1]
    print(f"KNN_ROC曲线{roc_auc_score(y_test,y_pre2)}")
    # 模型保存
    joblib.dump(es2,"../model/KNN_20250607.pkl")
    logger.info(f"模型保存成功，保存路径{os.path.abspath("../model/KNN_20250607.pkl")}")


    # 3.Adb模型实例化
    es3 = AdaBoostClassifier()
    #模型训练
    es3.fit(x_train, y_train)
    y_pre3 = es3.predict_proba(x_test)[:, 1]
    print(f"ADB_ROC曲线{roc_auc_score(y_test,y_pre3)}")
    # 模型保存
    joblib.dump(es3,"../model/Adb_20250607.pkl")
    logger.info(f"模型保存成功，保存路径{os.path.abspath("../model/Adb_20250607.pkl")}")


    # 4.L模型实例化
    es4 = LogisticRegression(random_state=25)
    #模型训练
    es4.fit(x_train, y_train)
    y_pre4 = es4.predict_proba(x_test)[:, 1]
    print(f"LOG_ROC曲线{roc_auc_score(y_test,y_pre4)}")
    # 模型保存
    joblib.dump(es4,"../model/LOR_20250607.pkl")
    logger.info(f"模型保存成功，保存路径{os.path.abspath("../model/LOR_20250607.pkl")}")



if __name__ == '__main__':
    pm = TalentlossModel("../data/train.csv")
    # print(pm.data_source)

    # 查看数据分布
    # ana_data(pm.data_source)

    # 特征工程
    feature_data,object_columns = feature_engineering(pm.data_source, pm.logfile)
    # print(feature_data)
    train_model(feature_data, object_columns,pm.logfile)