import matplotlib
from matplotlib import pyplot as plt
from sklearn.linear_model import LogisticRegression
matplotlib.use('TkAgg') # 设置后端
from sklearn.model_selection import train_test_split,GridSearchCV
import pandas as pd
from sklearn.metrics import roc_auc_score,classification_report
import numpy as np
from sklearn.metrics import accuracy_score,precision_score,recall_score,f1_score,make_scorer,confusion_matrix
import xgboost as xgb
from sklearn.model_selection import StratifiedKFold
import seaborn as sns
from scipy import stats
from sklearn.feature_selection import mutual_info_classif, f_classif
from sklearn.preprocessing import LabelEncoder, MinMaxScaler
from imblearn.over_sampling import RandomOverSampler, SMOTE, ADASYN
from sklearn.feature_selection import chi2
import warnings
# 忽略警告信息
warnings.filterwarnings('ignore')

random_state = 4
pd.set_option('display.max_columns', 9)  # None显示所有列
pd.set_option('display.expand_frame_repr', False)  # 禁止自动换行
pd.set_option('display.max_rows', 30)  # None显示所有行
def analyze_feature_correlations(X, y, numerical_cols, categorical_cols):
    """分析数值型和分型特征与二分类标签的相关性"""
    results = {}
    f_scores, p_values = f_classif(X[numerical_cols], y) # 1. ANOVA F值（方差分析）
    anova_df = pd.DataFrame({'Feature': numerical_cols,'F-score': f_scores,'p-value': p_values})
    results['anova'] = anova_df
    X_cat_encoded = X[categorical_cols].apply(LabelEncoder().fit_transform) # 对分类特征进行编码
    chi2_scores, p_values = chi2(X_cat_encoded, y) # 分类型特征分析 卡方检验
    chi2_df = pd.DataFrame({'Feature': categorical_cols,'Chi2-score': chi2_scores,'p-value': p_values})
    results['chi2'] = chi2_df
    scaler = MinMaxScaler() # 归一化数值特征
    X_num_scaled = scaler.fit_transform(X[numerical_cols])
    X_num_scaled_df = pd.DataFrame(X_num_scaled, columns=numerical_cols)
    X_processed = pd.concat([X_num_scaled_df, X_cat_encoded], axis=1) # 合并处理后的特征
    # 计算互信息
    mi_scores = mutual_info_classif(X_processed, y,discrete_features=[False] * len(numerical_cols) + [True] * len(categorical_cols))
    mi_df = pd.DataFrame({'Feature': X_processed.columns, 'MI-score': mi_scores})
    results['mutual_info'] = mi_df
    return results

def feature_analysis(path,target_name):
    # 1. 数值型特征的ANOVA F值越大表示类别间差异越显著 F>=10 强等相关性
    # 2. 分类型特征的卡方检验Chi2-Square 越大越相关
    # 3. 所有特征的互信息MI 值越大表示特征与标签相关性越强
    data = pd.read_csv(path)
    data.info()
    numerical_features = [i for i in data.columns if data[i].dtype != 'object' and i != target_name]
    categorical_features = [i for i in data.columns if data[i].dtype == 'object']
    X = data.drop(target_name, axis=1) # 分离特征和标签
    y = data[target_name]
    print(f"\n数值型特征: {numerical_features}")
    print(f"分类型特征: {categorical_features}")
    results = analyze_feature_correlations(X, y, numerical_features, categorical_features)
    # 可视化分析结果
    # plt.figure(figsize=(15, 18))
    # # 2. 数值型特征的ANOVA F值越大表示类别间差异越显著
    # plt.subplot(311)
    # sns.barplot(data=results['anova'], x='F-score', y='Feature', palette='viridis')
    # plt.title('ANOVA F-scores')
    # # 2. 分类型特征的卡方检验 越大越相关
    # plt.subplot(312)
    # sns.barplot(data=results['chi2'], x='Chi2-score', y='Feature', palette='coolwarm')
    # plt.title('Chi-Square Scores')
    # # 3. 所有特征的互信息 值越大表示特征与标签相关性越强
    # plt.subplot(313)
    # # 将互信息降序展示
    # results['mutual_info'] = results['mutual_info'].sort_values(by='MI-score', ascending=False)
    # sns.barplot(data=results['mutual_info'], x='MI-score', y='Feature', palette='Spectral')
    # plt.title('Mutual Information Scores')
    # plt.tight_layout()
    # plt.show()
    # 创建综合结果表
    final_results = pd.DataFrame()
    for feature in numerical_features: # 添加数值特征结果
        anova_row = results['anova'][results['anova']['Feature'] == feature]
        mi_row = results['mutual_info'][results['mutual_info']['Feature'] == feature]
        final_results = pd.concat([final_results, pd.DataFrame({
            'Feature': [feature],'Type': ['Numerical'],
            'ANOVA_F-score': [anova_row['F-score'].values[0]],
            'MI-score': [mi_row['MI-score'].values[0]]})])
    for feature in categorical_features: # 添加分类特征结果
        chi2_row = results['chi2'][results['chi2']['Feature'] == feature]
        mi_row = results['mutual_info'][results['mutual_info']['Feature'] == feature]
        final_results = pd.concat([final_results, pd.DataFrame({
            'Feature': [feature],'Type': ['Categorical'],
            'Chi2-score': [chi2_row['Chi2-score'].values[0]],
            'MI-score': [mi_row['MI-score'].values[0]]})])
    final_results = final_results.reset_index(drop=True) # 重置索引
    print("\n特征相关性综合评估:",final_results.sort_values(by='MI-score', ascending=False))
    final_feature_list = ['Age', 'DistanceFromHome', 'EnvironmentSatisfaction', 'JobInvolvement', 'JobLevel', 'JobSatisfaction', 'MonthlyIncome', 'NumCompaniesWorked', 'PercentSalaryHike', 'RelationshipSatisfaction', 'StockOptionLevel', 'TotalWorkingYears', 'TrainingTimesLastYear', 'WorkLifeBalance', 'YearsAtCompany', 'YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager','BusinessTravel', 'Department', 'EducationField', 'Gender', 'JobRole', 'MaritalStatus', 'OverTime','PerformanceRating','Education'] #,
    # 去掉三个['EmployeeNumber', 'StandardHours', 'Over18'] 尝试了Education，NumCompaniesWorked，YearsWithCurrManager都没提高
    categorical_features = [i for i in final_feature_list if i in categorical_features]
    return data[final_feature_list], y, final_feature_list,categorical_features

def fix_model1(X_train, y_train, X_test, y_test):
    ros = RandomOverSampler(random_state=random_state) # 先应用随机过采样
    X_train, y_train = ros.fit_resample(X_train, y_train)
    smote = SMOTE(random_state=random_state) # 再应用SMOTE
    X_train, y_train = smote.fit_resample(X_train, y_train)
    new_x_train,new_x_test = X_train,X_test
    cv = StratifiedKFold(n_splits=5, shuffle=True)
    model = LogisticRegression(max_iter=1000)
    param_grid = {
        'C': [0.1,0.5,2,3,4,5,6],
        'penalty': ['l1', 'l2'],
        'solver': ['liblinear'],
        'class_weight':['balanced',None]
    }
    model = GridSearchCV(model, param_grid, cv=cv, scoring='roc_auc', n_jobs=-1)
    model.fit(new_x_train, y_train)
    y_pre2 = model.predict(new_x_test)
    y_pre = model.predict_proba(new_x_test)[:, 1]
    print('模型1测试的auc:',roc_auc_score(y_test, y_pre))
    y_pred = (y_pre >= 0.5).astype(int) # 将概率预测转为类别预测（默认阈值0.5）
    print('模型1测试的acc:',accuracy_score(y_test, y_pred))
    return model,y_pre,y_pre2


def fix_model2(X_train, y_train, X_test, y_test):
    adasyn = ADASYN(random_state=6)
    X_train, y_train = adasyn.fit_resample(X_train, y_train)
    new_x_train = X_train
    new_x_test = X_test
    cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=random_state)
    model = xgb.XGBClassifier(random_state=6)
    param_grid = {'n_estimators':[210,220,150,200,130], 'learning_rate':[0.2,0.25,0.27], 'max_depth':[5,7,6,9],
                  'reg_alpha' : [None,0.5,0.9], 'reg_lambda' : [None,0.5,0.9]}
    model = GridSearchCV(model, param_grid, cv=cv, scoring='roc_auc', n_jobs=-1)   #"f1"
    model.fit(new_x_train, y_train)
    y_pre2 = model.predict(new_x_test)
    y_pre = model.predict_proba(new_x_test)[:, 1]
    print('模型2测试的auc:',roc_auc_score(y_test, y_pre))
    y_pred = (y_pre >= 0.5).astype(int)  # 将概率预测转为类别预测（默认阈值0.5）
    print('模型2测试的acc:', accuracy_score(y_test, y_pred))
    return model,y_pre,y_pre2

if __name__ == '__main__':
    X_train, y_train, feature_list, c_features = feature_analysis('../data/train.csv', 'Attrition')
    X_test_f, y_test_f, feature_list2, c_features2 = feature_analysis('../data/test2.csv', 'Attrition')
    le = LabelEncoder()
    if c_features:
        for i in c_features:
            X_train[i] = le.fit_transform(X_train[i])
            X_test_f[i] = le.transform(X_test_f[i])
    X_train, X_test, y_train, y_test = train_test_split(X_train, y_train, test_size=0.1, stratify=y_train, random_state=random_state)
    model1,pred1,pred_pre_1 = fix_model1(X_train, y_train, X_test, y_test)
    model2,pred2,pred_pre_2 = fix_model2(X_train, y_train, X_test, y_test)
    X_val_new = np.column_stack([pred1, pred2])  # 组合预测结果
    meta_model = LogisticRegression()
    meta_model.fit(X_val_new, y_test)
    y_pre_f1 = model1.predict_proba(X_test_f)[:, 1]
    y_pre_f2 = model2.predict_proba(X_test_f)[:, 1]
    X_pre_new = np.column_stack([y_pre_f1, y_pre_f2])
    print('模型1的auc:',roc_auc_score(y_test_f, y_pre_f1))
    y_pre_f1 = (y_pre_f1 >= 0.5).astype(int)
    print('模型1的acc:', accuracy_score(y_test_f, y_pre_f1))
    print('模型2的auc:',roc_auc_score(y_test_f, y_pre_f2))
    y_pre_f2 = (y_pre_f2 >= 0.5).astype(int)
    print('模型2的acc:', accuracy_score(y_test_f, y_pre_f2))
    y_pred_f = meta_model.predict_proba(X_pre_new)[:, 1] #预测概率（二分类取正类概率）
    print('最终模型的auc:',roc_auc_score(y_test_f, y_pred_f))
    y_pred_f = (y_pred_f >= 0.5).astype(int)
    print('最终模型的acc:', accuracy_score(y_test_f, y_pred_f))
    print("最终模型的混淆矩阵:\n", confusion_matrix(y_test_f, y_pred_f))
    # meta_model2 = GridSearchCV(meta_model, {}, cv=4, scoring='roc_auc', n_jobs=-1)
    # meta_model2.fit(X_val_new2, y_test)
    # print(meta_model2.best_params_)
    # y_pre_f1 = model1.predict(X_test_f)
    # print('y_pre:\n', classification_report(y_test_f, y_pre_f1))
    # print('模型1', roc_auc_score(y_test_f, y_pre_f1))
    # y_pre_f2 = model2.predict(X_test_f)
    # print('y_pre:\n', classification_report(y_test_f, y_pre_f2))
    # print('模型2', roc_auc_score(y_test_f, y_pre_f2))
    # X_pre_new = np.column_stack([y_pre_f1, y_pre_f2])
    # y_pred_f = meta_model2.predict(X_pre_new)
    # print('y_pre:\n', classification_report(y_test_f, y_pred_f))
    # print('最终模型', roc_auc_score(y_test_f, y_pred_f))
# RandomOverSampler（随机过采样）简单复制‌现有少数少数类样本实现平衡
# SMOTE（合成少数类过采样技术）在‌特征空间‌线性插值合成少数类新样本
# ADASYN（自适应合成采样）根据‌学习难度‌自适应生成样本，对分类困难区域生成更多样本



