import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder, StandardScaler
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
import xgboost as xgb
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score,roc_auc_score,roc_curve
from sklearn.model_selection import GridSearchCV
import joblib
import matplotlib
# 设置matplotlib的字体
matplotlib.rcParams['font.family'] = 'SimHei'  # 使用黑体
matplotlib.rcParams['axes.unicode_minus'] = False  # 解决负号'-'显示为方块的问题

def all_predicting_talent_attrition():
        # 1.获取数据集
        data = pd.read_csv('../../data/train.csv', encoding='utf-8')

        # 2.数据预处理
        # 删除不必要的列
        data = data.drop(['EmployeeNumber', 'StandardHours', 'Over18'], axis=1)

        encoder = LabelEncoder()
        # 获取所有分类列
        categorical_cols = ['Age', 'BusinessTravel', 'Department', 'DistanceFromHome', 'Education', 'EducationField',
                            'EnvironmentSatisfaction', 'Gender', 'JobInvolvement', 'JobLevel', 'JobRole',
                            'JobSatisfaction', 'MaritalStatus', 'MonthlyIncome', 'NumCompaniesWorked', 'OverTime',
                            'PercentSalaryHike', 'PerformanceRating', 'RelationshipSatisfaction', 'StockOptionLevel',
                            'TotalWorkingYears', 'TrainingTimesLastYear', 'WorkLifeBalance', 'YearsAtCompany',
                            'YearsInCurrentRole', 'YearsSinceLastPromotion', 'YearsWithCurrManager']
        # 对训练集的分类列进行编码
        for col in categorical_cols:
            data[col] = encoder.fit_transform(data[col])

        # 3.获取特征和标签
        x = data.drop(['Attrition'], axis=1)
        y = data['Attrition']

        # 4. 划分训练集和测试集
        x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=42)

        # 5. 特征标准化
        scaler = StandardScaler()
        x_train_scaled = scaler.fit_transform(x_train)
        x_test_scaled = scaler.transform(x_test)

        # # 6.模型训练与调优
        # 6.1 随机森林
        param_grid = {
                'n_estimators': [100, 200, 300],
                'max_depth': [None, 10, 20, 30],
                'min_samples_split': [2, 5],
                'min_samples_leaf': [1, 2]
        }
        clf = RandomForestClassifier(random_state=42)
        grid_search = GridSearchCV(estimator=clf, param_grid=param_grid, cv=3, scoring='roc_auc')
        grid_search.fit(x_train_scaled, y_train)

        # 最佳模型
        best_model = grid_search.best_estimator_

        # 预测
        y_predict = best_model.predict(x_test_scaled)
        print(f"精确率：{accuracy_score(y_test, y_predict)}")
        print(f"召回率：{classification_report(y_test, y_predict)}")
        print(f"AUC指标：{roc_auc_score(y_test, best_model.predict_proba(x_test_scaled)[:, 1])}")
        fpr, tpr, _ = roc_curve(y_test, best_model.predict_proba(x_test_scaled)[:, 1])

        # 特征重要性图
        feature_importances = best_model.feature_importances_
        sns.barplot(x=feature_importances, y=categorical_cols)
        plt.xlabel('feature_importances')
        plt.ylabel('categorical_cols')
        plt.title('Feature Importance')
        plt.tight_layout()
        plt.show()

        # # 6.2逻辑回归
        # es = LogisticRegression(penalty='l1', solver='liblinear')
        # best_model = es.fit(x_train_scaled, y_train)
        # # 获取特征系数
        # coefficients = np.abs(es.coef_[0])
        # feature_importances = pd.Series(coefficients, index=x.columns)
        # y_predict = best_model.predict(x_test_scaled)
        # print(f"精确率：{accuracy_score(y_test, y_predict)}")
        # print(f"召回率：{classification_report(y_test, y_predict)}")
        # print(f"AUC指标：{roc_auc_score(y_test, best_model.predict_proba(x_test_scaled)[:, 1])}")
        # fpr, tpr, _ = roc_curve(y_test, best_model.predict_proba(x_test_scaled)[:, 1])
        #
        # sorted_indices = np.argsort(feature_importances)[::-1]
        # sorted_feature_importances = feature_importances[sorted_indices]
        # sorted_columns = x.columns[sorted_indices]
        # sns.barplot(x=sorted_feature_importances, y=sorted_columns)
        # plt.xlabel('feature_importances')
        # plt.ylabel('categorical_cols')
        # plt.title('logistic-Feature_Importance')
        # plt.tight_layout()
        # plt.show()

        # 6.3XGBoost
        # es = xgb.XGBClassifier()
        # best_model = es.fit(x_train_scaled, y_train)
        # y_predict = best_model.predict(x_test_scaled)
        # print(f"精确率：{accuracy_score(y_test, y_predict)}")
        # print(f"召回率：{classification_report(y_test, y_predict)}")
        # print(f"AUC指标：{roc_auc_score(y_test, best_model.predict_proba(x_test_scaled)[:, 1])}")
        # fpr, tpr, _ = roc_curve(y_test, best_model.predict_proba(x_test_scaled)[:, 1])
        # # plt.plot(fpr, tpr, label='ROC curve (area = %0.2f)' % roc_auc_score(y_test, best_model.predict_proba(x_test_scaled)[:, 1]))
        # plt.plot([0, 1], [0, 1], 'k--')
        # plt.xlabel('False Positive Rate')
        # plt.ylabel('True Positive Rate')
        # plt.title('ROC Curve')
        # plt.legend(loc="lower right")
        # 特征重要性图
        # feature_importances = best_model.feature_importances_
        # sns.barplot(x=feature_importances, y=categorical_cols)
        # plt.xlabel('feature_importances')
        # plt.ylabel('categorical_cols')
        # plt.title('XGB-Feature_Importance')
        # plt.tight_layout()
        # plt.show()

        joblib.dump(best_model, '../models/logisticRegression_best_model.pkl')

if __name__ == '__main__':
    all_predicting_talent_attrition()