import os
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import datetime
from Mr_Zhong.utils.log import Logger
from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import mean_squared_error, mean_absolute_error
from sklearn.metrics import roc_auc_score, classification_report
from sklearn.utils.class_weight import compute_sample_weight
import seaborn as sns
import joblib

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class PowerLoadModel(object):
    def __init__(self, filename):
        # 配置日志记录
        logfile_name = "train_" + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        self.logfile = Logger('../', logfile_name).get_logger()
        # 获取数据源
        self.data_source = pd.read_csv(filename, encoding='utf-8')


def ana_data(data):
    data = data.copy(deep=True)

    # 1.数据整体情况
    print(f'数据整体情况:\n{data.info}')
    print(f'数据前五列:\n{data.head()}')
    print(f'数据列表名字:\n{data.columns}')

    # 2.检查异常点并且可视化
    # 2.1计算异常点数量
    null_counts = data.isnull().sum()
    # 2.2绘制柱状图
    plt.figure(figsize=(10, 6))
    plt.bar(null_counts.index, null_counts.values)
    plt.xticks(rotation=90)  # 避免x轴标签重叠，旋转标签
    plt.ylabel('异常点数量')
    plt.title('数据集中各列的异常点数量')
    plt.tight_layout()  # 自动调整子图参数，使之填充整个图像区域
    plt.savefig('../result/各个特征的异常点数量.png')

    # 3.显示各个特征的种类数量并且可视化
    # 3.1计算每个特征的唯一值数量
    unique_counts = data.nunique()
    # 3.2绘图
    plt.figure(figsize=(12, 6))
    bars = plt.bar(unique_counts.index, unique_counts.values, color='skyblue')
    # 3.3添加数值标签
    for bar in bars:
        yval = bar.get_height()
        plt.text(bar.get_x() + bar.get_width() / 2.0, yval + 0.5, int(yval),
                 ha='center', va='bottom', fontsize=10)
    # 3.4设置坐标轴与标题
    plt.xticks(rotation=90)
    plt.ylabel('唯一值数量')
    plt.title('数据集中各特征的类别数量', fontsize=14)
    # 3.5去掉顶部和右边框
    ax = plt.gca()
    ax.spines['top'].set_visible(False)
    ax.spines['right'].set_visible(False)
    # 3.6自动调整布局
    plt.tight_layout()
    # 3.7模型保存
    plt.savefig('../result/各个特征的种类数量.png')

    # 4.显示图形
    plt.show()

def feature_engineering(data, logger):
    logger.info('===============开始进行特征工程处理===============')
    result = data.copy(deep=True)
    logger.info("===============开始处理数据特征===================")
    # 1.提取出特征
    # 全部特征

    # 去掉特征PerformanceRating,JobRole,WorkLifeBalance
    X_train = result.loc[:, ['Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education',
                             'EducationField', 'EnvironmentSatisfaction', 'Gender', 'JobInvolvement', 'JobLevel',
                             'JobRole', 'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked',
                             'OverTime', 'PercentSalaryHike', 'PerformanceRating', 'RelationshipSatisfaction',
                             'StockOptionLevel', 'TotalWorkingYears', 'TrainingTimesLastYear',
                             'WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion',
                             'YearsWithCurrManager']]
    Y_train = result.iloc[:, 0]

    # 1.1修改BusinessTravel中的值, Non-Travel,Travel_Rarely,Travel_Frequently
    travel_BusinessTravel_map = {
        'Non-Travel': 0,
        'Travel_Rarely': 1,
        'Travel_Frequently': 2
    }
    X_train['BusinessTravel'] = X_train['BusinessTravel'].map(travel_BusinessTravel_map)

    # 1.2修改Department中的值, Human Resources, Research & Development , Sales
    travel_Department_map = {
        'Human Resources': 1,
        'Research & Development': 2,
        'Sales': 3
    }
    X_train['Department'] = X_train['Department'].map(travel_Department_map)

    # 1.3修改EducationField中的值, Life Sciences , Medical , Marketing, Technical Degree, Other, Human Resources
    travel_EducationField_map = {
        'Life Sciences': 1,
        'Medical': 2,
        'Marketing': 3,
        'Technical Degree': 4,
        'Other': 5,
        'Human Resources': 6
    }
    X_train['EducationField'] = X_train['EducationField'].map(travel_EducationField_map)

    # 1.4修改Gender中的值, Male,female
    travel_Gender_map = {
        'Male': 1,
        'Female': 0,
    }
    X_train['Gender'] = X_train['Gender'].map(travel_Gender_map)

    # 1.5修改JobRole中的值, Sales Executive ,Research Scientist,Laboratory Technician,Manufacturing Director ,
    # Healthcare Representative ,Manager ,Sales Representative  ,Research Director ,Human Resources
    travel_JobRole_map = {
        'Sales Executive': 1,
        'Research Scientist': 2,
        'Laboratory Technician': 3,
        'Manufacturing Director': 4,
        'Healthcare Representative': 5,
        'Manager': 6,
        'Sales Representative': 7,
        'Research Director': 8,
        'Human Resources': 9
    }
    X_train['JobRole'] = X_train['JobRole'].map(travel_JobRole_map)

    # 1.6修改MaritalStatus中的值, Married ,Single ,Divorced
    travel_MaritalStatus_map = {
        'Divorced': 0,
        'Single': 1,
        'Married': 2
    }
    X_train['MaritalStatus'] = X_train['MaritalStatus'].map(travel_MaritalStatus_map)

    # 1.7修改OverTime中的值, No, Yes
    travel_OverTime_map = {
        'No': 0,
        'Yes': 1
    }
    X_train['OverTime'] = X_train['OverTime'].map(travel_OverTime_map)

    # X_train.to_csv('X_train.csv', index=False)
    logger.info("===============数据特征化结束===================")
    return X_train, Y_train


def model_CV_train(X_train, Y_train, logger, param_dict):
    logger.info("=========开始交叉网格处理===================")
    # 1.数据集切分
    x_data = X_train
    y_data = Y_train
    x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, random_state=6, stratify=y_data)
    cls_weight = compute_sample_weight('balanced', y_train)
    # 2.网格化搜索与交叉验证
    # 2.1备选的超参数
    print("开始网格化搜索")
    print(datetime.datetime.now())

    # 2.2实例化网格化搜索，配置交叉验证
    grid_cv = GridSearchCV(estimator=RandomForestClassifier(),
                           param_grid=param_dict,
                           cv=5,
                           scoring="roc_auc",
                           verbose=1)
    # 2.3网格化搜索与交叉验证训练
    grid_cv.fit(x_train, y_train, sample_weight=cls_weight)
    # 2.4输出最优的超参数组合
    print(grid_cv.best_params_)
    print("最佳评分:", grid_cv.best_score_)
    print("结束网格化搜索")
    print(datetime.datetime.now())
    # logger.info("网格化搜索后找到的最优的超参数组合是：learning_rate: 0.1, max_depth: 6, n_estimators: 150")
    logger.info("=========交叉网格处理结束===================")

def model_train(X_train, Y_train, logger, n_estimators, max_depth):

    logger.info("=========开始训练模型===================")
    # 1.数据集切分
    x_data = X_train
    y_data = Y_train
    x_train, x_test, y_train, y_test = train_test_split(x_data, y_data, test_size=0.2, random_state=6, stratify=y_data)
    cls_weight = compute_sample_weight('balanced', y_train)


    # 2.模型训练
    # 2.1创建XGBoost对象
    rf = RandomForestClassifier(
        n_estimators=n_estimators,  # 树的数量
        max_depth=max_depth,  # 最大深度
        random_state=6  # 随机种子
    )

    # 2.2模型训练
    rf.fit(x_train, y_train, sample_weight=cls_weight)
    # 3.模型评价
    # 3.1模型在训练集上的预测结果
    y_pred_train = rf.predict(x_train)
    # 3.2模型在测试集上的预测结果
    y_pred_test = rf.predict_proba(x_test)[:, 1]
    y_pred_test1 = rf.predict(x_test)


    # 3.3模型在训练集上的MSE、MAPE
    mse_train = mean_squared_error(y_true=y_train, y_pred=y_pred_train)
    mae_train = mean_absolute_error(y_true=y_train, y_pred=y_pred_train)
    print(f"模型在训练集上的均方误差：{mse_train}")
    print(f"模型在训练集上的平均绝对误差：{mae_train}")
    # 3.4模型在测试集上的MSE、MAPE
    mse_test = mean_squared_error(y_true=y_test, y_pred=y_pred_test)
    mae_test = mean_absolute_error(y_true=y_test, y_pred=y_pred_test)
    print(f"模型在测试集上的均方误差：{mse_test}")
    print(f"模型在测试集上的平均绝对误差：{mae_test}")
    print(f"模型在测试集上的auc:{roc_auc_score(y_test, y_pred_test)}")
    # 3.5模型在测试集上准确率、精确率、召回率、f1分数
    print(f"准确率(accuracy):{accuracy_score(y_test, y_pred_test1)}")
    print(f"精确率(precision):{precision_score(y_test, y_pred_test1, pos_label=0)}")
    print(f"召回率(recall):{recall_score(y_test, y_pred_test1, pos_label=0)}")
    print(f"f1分数:{f1_score(y_test, y_pred_test1, pos_label=0)}")
    print(f"分类评估报告:{classification_report(y_test, y_pred_test1)}")
    logger.info("=========================模型训练完成=============================")
    logger.info(f"模型在训练集上的均方误差：{mse_train}")
    logger.info(f"模型在训练集上的平均绝对误差：{mae_train}")
    logger.info(f"模型在测试集上的均方误差：{mse_test}")
    logger.info(f"模型在测试集上的平均绝对误差：{mae_test}")
    logger.info(f"模型对新数据进行预测的准确率(accuracy):{roc_auc_score(y_test, y_pred_test1)}")
    logger.info(f"模型对新数据进行预测的精确率(precision):{precision_score(y_test, y_pred_test1, pos_label=0)}")
    logger.info(f"模型对新数据进行预测的召回率(recall):{recall_score(y_test, y_pred_test1, pos_label=0)}")
    logger.info(f"模型对新数据进行预测的f1分数:{f1_score(y_test, y_pred_test1, pos_label=0)}")
    logger.info(f"模型在测试集上的auc:{roc_auc_score(y_test, y_pred_test)}")
    # 4.模型保存
    joblib.dump(rf, '../model/xgb.pkl')

    # 5.查看特征比重
    # 5.1获取特征重要性
    importances = rf.feature_importances_

    # 5.2构建成 DataFrame 方便排序和查看
    feature_importance_df = pd.DataFrame({
        'Feature': x_train.columns,
        'Importance': importances
    })

    # 5.3按照重要性从高到低排序
    feature_importance_df = feature_importance_df.sort_values(by='Importance', ascending=False)

    sns.set(style="whitegrid")

    # 5.4绘图
    plt.figure(figsize=(12, 8))
    sns.barplot(x='Importance', y='Feature', data=feature_importance_df, palette="viridis")
    plt.title('XGBoost 模型特征重要性', fontsize=14)
    plt.tight_layout()
    plt.savefig('../result/模型的特征权重.png')
    # plt.show()


if __name__ == '__main__':
    # 0.1交叉网格划分
    param_dict = {
        'n_estimators': [i for i in range(10, 500, 10)],
        'max_depth': [i for i in range(2, 13, 1)],
    }
    # 0.2超参数输入
    n_estimators = 350
    max_depth = 12

    # 1.加载数据集
    input_file = os.path.join('../data', 'train.csv')
    model = PowerLoadModel(input_file)
    # # 2.分析数据
    # ana_data(model.data_source)
    # 3.特征工程
    X_train, Y_train = feature_engineering(model.data_source, model.logfile)

    # 4.交叉网格化选取最优参数
    # model_CV_train(X_train, Y_train, model.logfile, param_dict)

    # 5.模型训练、模型评价与模型保存
    model_train(X_train, Y_train, model.logfile, n_estimators, max_depth)

