import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,roc_auc_score,roc_curve
from sklearn.model_selection import GridSearchCV
import joblib
import matplotlib
# 设置matplotlib的字体
matplotlib.rcParams['font.family'] = 'SimHei'  # 使用黑体
matplotlib.rcParams['axes.unicode_minus'] = False  # 解决负号'-'显示为方块的问题
def predicting_model_talent_attrition():
        # 1.获取数据集
        data = pd.read_csv('../../data/test_encoded.csv', encoding='utf-8')

        # 2.数据预处理
        # 删除不必要的列
        data = data.drop(['EmployeeNumber', 'StandardHours', 'Over18'], axis=1)
        # 3.获取所有分类列（假设这些是分类变量）
        x = data.drop(['Attrition'], axis=1)
        y = data['Attrition']
        # 4.划分训练集和测试集
        x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)
        # 5. 特征标准化
        scaler = StandardScaler()
        x_train_scaled = scaler.fit_transform(x_train)
        x_test_scaled = scaler.transform(x_test)

        # 5.1随机森林模型
        es = joblib.load("../models/best_model.pkl")
        y_predict = es.predict(x_test_scaled)
        print(f"精确率：{accuracy_score(y_test, y_predict)}")
        print(f"召回率：{classification_report(y_test, y_predict)}")
        print(f"AUC指标：{roc_auc_score(y_test, es.predict_proba(x_test_scaled)[:, 1])}")
        fpr, tpr, _ = roc_curve(y_test, es.predict_proba(x_test_scaled)[:, 1])

        plt.figure(figsize=(10, 10))
        plt.subplot(2, 1, 1)
        plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc_score(y_test, es.predict_proba(x_test_scaled)[:, 1]))
        plt.plot([0, 1], [0, 1], 'k--')
        plt.xlabel('False Positive Rate')
        plt.ylabel('True Positive Rate')
        plt.title('ROC Curve')
        plt.legend(loc="lower right")

        # 特征重要性图
        plt.subplot(2, 1, 2)
        feature_importances = es.feature_importances_
        sorted_indices = np.argsort(feature_importances)[::-1]
        sorted_feature_importances = feature_importances[sorted_indices]
        sorted_columns = x.columns[sorted_indices]
        sns.barplot(x=sorted_feature_importances, y=sorted_columns)
        plt.xlabel('feature_importances')
        plt.ylabel('categorical_cols')
        plt.title('trees-Feature_Importance')
        plt.tight_layout()
        plt.show()

        
        # # 5.2逻辑回归
        # es = joblib.load("../models/logisticRegression_best_model.pkl")
        # # 预测
        # y_predict = es.predict(x_test_scaled)
        # print(f"精确率：{accuracy_score(y_test, y_predict)}")
        # print(f"召回率：{classification_report(y_test, y_predict)}")
        # print(f"AUC指标：{roc_auc_score(y_test, es.predict_proba(x_test_scaled)[:, 1])}")
        # fpr, tpr, _ = roc_curve(y_test, es.predict_proba(x_test_scaled)[:, 1])
        #
        # # 特征重要性图
        # coefficients = np.abs(es.coef_[0])
        # feature_importances = pd.Series(coefficients, index=x.columns)
        # sorted_indices = np.argsort(feature_importances)[::-1]
        # sorted_feature_importances = feature_importances[sorted_indices]
        # sorted_columns = x.columns[sorted_indices]
        # sns.barplot(x=sorted_feature_importances, y=sorted_columns)
        # plt.xlabel('feature_importances')
        # plt.ylabel('categorical_cols')
        # plt.title('LogisticRegression-Feature_Importance')
        # plt.tight_layout()
        # plt.show()


        # 5.3 XGBoost
        # es = joblib.load("../models/xgb_best_model.pkl")
        # # 预测
        # y_predict = es.predict(x_test_scaled)
        # print(f"精确率：{accuracy_score(y_test, y_predict)}")
        # print(f"召回率：{classification_report(y_test, y_predict)}")
        # print(f"AUC指标：{roc_auc_score(y_test, es.predict_proba(x_test_scaled)[:, 1])}")
        # fpr, tpr, _ = roc_curve(y_test, es.predict_proba(x_test_scaled)[:, 1])

        # plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc_score(y_test, es.predict_proba(x_test_scaled)[:, 1]))
        # plt.plot([0, 1], [0, 1], 'k--')
        # plt.xlabel('False Positive Rate')
        # plt.ylabel('True Positive Rate')
        # plt.title('ROC Curve')
        # plt.legend(loc="lower right")

        # feature_importances = es.feature_importances_
        # sorted_indices = np.argsort(feature_importances)[::-1]
        # sorted_feature_importances = feature_importances[sorted_indices]
        # sorted_columns = x.columns[sorted_indices]
        # sns.barplot(x=sorted_feature_importances, y=sorted_columns)
        # plt.xlabel('feature_importances')
        # plt.ylabel('categorical_cols')
        # plt.title('XGB-Feature_Importance')
        # plt.tight_layout()
        # plt.show()

if __name__ == '__main__':
    predicting_model_talent_attrition()

    "测试git"
    "============测试git============="
    "------------测试git-------------"
    "============测试git============="