import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_auc_score, roc_curve
from sklearn.model_selection import GridSearchCV
import joblib
import matplotlib
from imblearn.over_sampling import SMOTE

# 设置matplotlib的字体
matplotlib.rcParams['font.family'] = 'SimHei'  # 使用黑体
matplotlib.rcParams['axes.unicode_minus'] = False  # 解决负号'-'显示为方块的问题


def predicting_model_talent_attrition():
    # 1.获取数据集
    data = pd.read_csv('../data/test2.csv', encoding='utf-8')

    # 2.数据预处理
    # 删除不必要的列
    drop_cols = [
        'YearsInCurrentRole',
        'YearsWithCurrManager',
        'PerformanceRating',
        'Over18'
    ]
    x = data.drop(columns=drop_cols + ['Attrition'])
    y = data['Attrition']

    # 类别编码
    label_encoder = LabelEncoder()
    for col in x:
        x[col] = label_encoder.fit_transform(x[col])

    # 5. 特征标准化
    scaler = StandardScaler()
    x_train_scaled = scaler.fit_transform(x)
    # x_test_scaled = scaler.transform()

    # SMOTE处理类别不平衡
    smote = SMOTE(random_state=42, k_neighbors=4)
    x_resampled, y_resampled = smote.fit_resample(x_train_scaled, y)

    # 4.划分训练集和测试集
    x_train, x_test, y_train, y_test = train_test_split(x_resampled, y_resampled, test_size=0.2, random_state=42)
    # # 5. 特征标准化
    # scaler = StandardScaler()
    # x_train_scaled = scaler.fit_transform(x_train)
    # x_test_scaled = scaler.transform(x_test)

    # # SMOTE处理类别不平衡
    # smote = SMOTE(random_state=42, k_neighbors=4)
    # x_resampled, y_resampled = smote.fit_resample(x_train_scaled, y)

    # 5.1随机森林模型
    es = joblib.load("../model/rf_model.pkl")
    # # 特征重要性图
    # feature_importances = es.feature_importances_
    # sorted_indices = np.argsort(feature_importances)[::-1]
    # sorted_feature_importances = feature_importances[sorted_indices]
    # sorted_columns = x.columns[sorted_indices]
    # sns.barplot(x=sorted_feature_importances, y=sorted_columns)
    # plt.xlabel('feature_importances')
    # plt.ylabel('categorical_cols')
    # plt.title('trees-Feature_Importance')
    # plt.tight_layout()
    # plt.show()

    # 5.2逻辑回归
    # es = joblib.load("../models/logisticRegression_best_model.pkl")
    # 预测
    y_predict = es.predict(x_test)
    print(f"精确率：{accuracy_score(y_test, y_predict)}")
    print(f"召回率：{classification_report(y_test, y_predict)}")
    print(f"AUC指标：{roc_auc_score(y_test, es.predict_proba(x_test)[:, 1])}")
    fpr, tpr, _ = roc_curve(y_test, es.predict_proba(x_test)[:, 1])

    # 特征重要性图
    feature_importances = es.feature_importances_
    sorted_indices = np.argsort(feature_importances)[::-1]
    sorted_feature_importances = feature_importances[sorted_indices]
    sorted_columns = x.columns[sorted_indices]
    sns.barplot(x=sorted_feature_importances, y=sorted_columns)
    plt.xlabel('feature_importances')
    plt.ylabel('categorical_cols')
    plt.title('LogisticRegression-Feature_Importance')
    plt.tight_layout()
    plt.show()

    # 5.3 XGBoost
    es = joblib.load("../model/xgb_model.pkl")
    # # 预测
    y_predict = es.predict(x_test)
    print(f"精确率：{accuracy_score(y_test, y_predict)}")
    print(f"召回率：{classification_report(y_test, y_predict)}")
    print(f"AUC指标：{roc_auc_score(y_test, es.predict_proba(x_test)[:, 1])}")
    fpr, tpr, _ = roc_curve(y_test, es.predict_proba(x_test)[:, 1])

    # plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc_score(y_test, es.predict_proba(x_test_scaled)[:, 1]))
    # plt.plot([0, 1], [0, 1], 'k--')
    # plt.xlabel('False Positive Rate')
    # plt.ylabel('True Positive Rate')
    # plt.title('ROC Curve')
    # plt.legend(loc="lower right")

    # feature_importances = es.feature_importances_
    # sorted_indices = np.argsort(feature_importances)[::-1]
    # sorted_feature_importances = feature_importances[sorted_indices]
    # sorted_columns = x.columns[sorted_indices]
    # sns.barplot(x=sorted_feature_importances, y=sorted_columns)
    # plt.xlabel('feature_importances')
    # plt.ylabel('categorical_cols')
    # plt.title('XGB-Feature_Importance')
    # plt.tight_layout()
    # plt.show()


if __name__ == '__main__':
    predicting_model_talent_attrition()
