from utils.log import Logger
import pandas as pd
import joblib
from imblearn.over_sampling import SMOTE
from matplotlib import pyplot as plt
import numpy as np
from sklearn.preprocessing import OneHotEncoder     # 对非数字类型的数据进行热编码处理
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
import lightgbm as lgb      # 使用lightgbm模型
from sklearn.model_selection import GridSearchCV
from sklearn.metrics import precision_score, accuracy_score, roc_auc_score, recall_score, f1_score, roc_curve  # 模型评估指标
import os
os.environ['LOKY_MAX_CPU_COUNT'] = '4'

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

from scipy.stats import chi2_contingency

logger_obj = Logger("../", log_name="train")  # 实例化日志类对象
logger = logger_obj.get_logger()    # 调用日志方法


# 1.读取数据文件,返回df对象
def init():
    df = pd.read_csv("../data/train.csv",encoding="UTF-8")
    return df


# 2.进行数据探索
def eda(df):
# 计算与目标变量的关系并可视化
    target = 'Attrition'
    
    # 创建图表布局
    fig, axes = plt.subplots(2, 2, figsize=(15, 10))
    fig.suptitle('特征与目标变量关系分析', fontsize=16)
    
    # 1. Over18 与 Attrition 的关系
    over18_attrition = pd.crosstab(df['Over18'], df[target])
    over18_attrition.plot(kind='bar', ax=axes[0,0], title='Over18 vs Attrition')
    axes[0,0].set_ylabel('Count')
    axes[0,0].tick_params(axis='x', rotation=0)
    
    # 执行卡方检验
    chi2, p_value, _, _ = chi2_contingency(over18_attrition)
    axes[0,0].text(0.05, 0.95, f'p-value: {p_value:.4f}', transform=axes[0,0].transAxes, 
                   verticalalignment='top', bbox=dict(boxstyle='round', facecolor='white', alpha=0.8))
    
    # 2. StandardHours 与 Attrition 的关系
    df.boxplot(column='StandardHours', by=target, ax=axes[0,1])
    axes[0,1].set_title('StandardHours vs Attrition')
    axes[0,1].set_xlabel(target)
    
    # 3. EmployeeNumber 与 Attrition 的关系
    df.boxplot(column='EmployeeNumber', by=target, ax=axes[1,0])
    axes[1,0].set_title('EmployeeNumber vs Attrition')
    axes[1,0].set_xlabel(target)
    
    # 4. 三个特征之间的相关性热力图
    subset_df = df[['Over18', 'StandardHours', 'EmployeeNumber']].copy()
    
    # 对分类变量进行编码
    if subset_df['Over18'].dtype == 'object':
        subset_df['Over18'] = pd.Categorical(subset_df['Over18']).codes
    
    corr_matrix = subset_df.corr()
    im = axes[1,1].imshow(corr_matrix, cmap='coolwarm', aspect='auto')
    axes[1,1].set_title('特征间相关性')
    axes[1,1].set_xticks(range(len(corr_matrix.columns)))
    axes[1,1].set_yticks(range(len(corr_matrix.columns)))
    axes[1,1].set_xticklabels(corr_matrix.columns, rotation=45)
    axes[1,1].set_yticklabels(corr_matrix.columns)
    
    # 添加相关系数值
    for i in range(len(corr_matrix.columns)):
        for j in range(len(corr_matrix.columns)):
            axes[1,1].text(j, i, f'{corr_matrix.iloc[i, j]:.2f}', 
                          ha='center', va='center', color='black')
    
    plt.tight_layout()
    plt.show()
    
    # 输出统计信息
    print("特征与目标变量关系统计:")
    print("-" * 50)
    
    # Over18 分布
    print("\nOver18 分布:")
    print(df['Over18'].value_counts())
    print("\nOver18 与 Attrition 交叉表:")
    print(over18_attrition)
    
    # StandardHours 统计
    print("\nStandardHours 统计信息:")
    print(df.groupby(target)['StandardHours'].describe())
    
    # EmployeeNumber 统计
    print("\nEmployeeNumber 统计信息:")
    print(df.groupby(target)['EmployeeNumber'].describe())


# 3.特征提取
def feature_engineering(df):
    pass


# 4.模型训练
def model_train(df):

    logger.info("开始进行模型训练")

    # 4.1.1 删除某个特征列
    df1 = df.drop(["Over18", "StandardHours", "EmployeeNumber", "Attrition"], axis=1)
    # print(df1.head())

    df1 = pd.get_dummies(df1)
    df1['Attrition'] = df['Attrition']
    print(df1.head(10))
    print(df1.columns)

    # 4.2 获取到特征值、目标值
    x = df1.iloc[:, :-1]
    y = df1.iloc[:, -1]

    # 4.3 划分数据集
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=108)

    # 4.4 数据采样(因为目标分类的样本比例过大)
    scale_pos_weight = len(y_train[y_train == 0]) / len(y_train[y_train == 1])
    print(f"scale_pos_weight = {scale_pos_weight:.2f}")

    # # 4.4 对数据进行热编码处理
    # encoder = OneHotEncoder()
    # x_train_ec = encoder.fit_transform(x_train[["EducationField", "Gender", "JobRole", "MaritalStatus", "OverTime"]])
    # x_test_ec = encoder.transform(x_train[["EducationField", "Gender", "JobRole", "MaritalStatus", "OverTime"]])

    # 4.4.5 特征预处理(标准化)
    transformer = StandardScaler()
    x_train = transformer.fit_transform(x_train)
    x_test = transformer.transform(x_test)

    # 4.4.6 模型构建和训练
    model = lgb.LGBMClassifier(
        max_depth=10,
        n_estimators=100,
        random_state=108,
        scale_pos_weight=scale_pos_weight,
        reg_alpha=0.1,
        reg_lambda=0.1,     # 防止过拟合
        subsample=0.8,       # 子采样
    )
    model.fit(x_train, y_train)

    # # 4.4.7 进行交叉验证、网格搜索
    # param_dict = {
    #     "max_depth": [i*5 for i in range(2,6)],
    #     "n_estimators": [i * 50 for i in range(1,5)],
    #     "random_state": [108],
    #     "reg_alpha": [0.1],
    #     "reg_lambda": [0.1],  # 防止过拟合
    #     "subsample": [0.8],  # 子采样
    #     "scale_pos_weight": [scale_pos_weight]
    # }
    # grid_search = GridSearchCV(model, param_grid=param_dict, cv=4)
    # grid_search.fit(x_train, y_train)
    # print(grid_search.best_params_)


    # 4.5. 使用测试集进行模型预测
    y_pre = model.predict(x_test)
    y_pred_proba = model.predict_proba(x_test)[:, 1]        # 获取预测的第一列
    # y_pre = (y_pred_proba >= 0.2).astype(int)                # 获得预测的类别 降低阈值，提高召回率

    # 4.6 模型评估
    print(f"模型的准确率为：{accuracy_score(y_test, y_pre)}")
    print(f"ROC-AUC面积：{roc_auc_score(y_test, y_pred_proba)}")
    print(f"模型的精确率为：{precision_score(y_test, y_pre)}")
    print(f"模型的召回率为：{recall_score(y_test, y_pre)}")
    print(f"模型的f1指标为：{f1_score(y_test, y_pre)}")

    joblib.dump(model, "../model/model_LGBM.pkl")           # 保存模型
    joblib.dump(transformer, "../model/transformer.pkl")    # 保存transformer

    # 绘制ROC曲线
    fpr, tpr, thresholds = roc_curve(y_test, y_pred_proba)
    plt.plot(fpr, tpr, c="r")
    plt.xlabel("fpr")
    plt.ylabel("tpr")
    plt.show()

    logger.info("完成模型的预测和评估")


if __name__ == '__main__':
    # 1.读取文件内容
    df = init()

    # 2.数据探索
    eda(df)

    # 3.特征提取
    feature_engineering(df)

    # 4.模型训练
    model_train(df)
