import matplotlib.pyplot as plt
import pandas as pd
import seaborn as sns
from sklearn.ensemble import RandomForestClassifier
from sklearn.feature_selection import chi2, SelectKBest
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder

from src.zzm.utils.common import data_preprocessing

plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15


def feature_proprecessing(data):
    """
    数据预处理
    :param data:
    :return:
    """
    train_data = data.copy()

    # 1、去除不重要的特征列
    train_data.pop('Over18')
    train_data.pop('EmployeeNumber')
    train_data.pop('StandardHours')

    # 2、热编码
    le = LabelEncoder()
    for col_name in train_data.select_dtypes(include='object').columns:
        train_data[col_name] = le.fit_transform(train_data[col_name])

    return train_data


def feature_choise_random_forest(data, feature_names=None):
    """
    方式一：特征选择 -  特征重要性图（基于随机森林）
    使用随机森林绘图，找出相对重要的特征
    :param data:源数据
    :param logger:日志
    :return:无返回值
    """
    # 1、选取特征
    train_data = feature_proprecessing(data)
    y = train_data.pop('Attrition')
    x = train_data if feature_names is None else train_data[feature_names]

    # 2、拆分数据
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=25)

    # 3、模型训练
    # 训练模型并获取特征重要性
    model = RandomForestClassifier(random_state=25)
    model.fit(x_train, y_train)
    importance = model.feature_importances_

    df = pd.DataFrame({
        'feature': x.columns,
        'importance': importance
    })
    df.sort_values(by=['importance'], inplace=True, ascending=False)

    # 保存 特征与重要性 到excel文件中
    # df.to_excel('../../data/processed/feature_important.xlsx', index=False)

    # 绘制条形图
    plt.figure(figsize=(60, 40))
    sns.barplot(x='importance', y='feature', data=df)
    plt.title('特征重要性排序')
    plt.xticks(fontsize=30, rotation=90)  # x轴刻度字体大小
    plt.yticks(fontsize=30)  # y轴刻度字体大小
    plt.savefig('../output/random_forest_feature.png')
    plt.show()


def feature_choise_kf(data, feature_names=None):
    """
    方式二：特征选择 -  特征重要性图（基于卡方）
    :param data:源数据
    :param logger:日志
    :return:无返回值
    """
    # 1、选取特征
    train_data = feature_proprecessing(data)
    y = train_data.pop('Attrition')
    x = train_data if feature_names is None else train_data[feature_names]

    # 2、拆分数据
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=25)

    # 3、模型训练
    # 训练模型并获取特征重要性
    # 执行卡方检验
    # 使用SelectKBest和chi2计算每个特征与标签之间的卡方值和p值
    selector = SelectKBest(chi2, k='all')
    selector.fit(x_train, y_train)

    # 获取卡方值和p值
    chi2_scores = selector.scores_
    p_values = selector.pvalues_

    # 创建结果DataFrame
    results = pd.DataFrame({
        '特征': train_data.columns,
        '卡方值': chi2_scores,
        'p值': p_values,
        '相关性强度': ['强' if p < 0.001 else '中' if p < 0.05 else '弱' if p < 0.1 else '无' for p in p_values]
    })

    # 按卡方值降序排序
    results = results.sort_values('卡方值', ascending=False).reset_index()
    print("\n卡方检验结果:")
    print(results)
    print('-' * 31)
    print(results[results['相关性强度'] != '无']['特征'].tolist())
    print()
    print(results[results['相关性强度'] == '无']['特征'].tolist())


if __name__ == '__main__':
    data = data_preprocessing('../../../data/raw/train.csv')
    feature_names_class = []
    feature_choise_kf(data)
    # feature_names = ['Education', 'NumCompaniesWorked', 'RelationshipSatisfaction', 'Gender', 'WorkLifeBalance', 'Department', 'PerformanceRating', 'EducationField', 'BusinessTravel', 'TrainingTimesLastYear', 'PercentSalaryHike']
    # feature_choise_random_forest(data, feature_names)