import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.metrics import roc_curve, roc_auc_score, accuracy_score, precision_score, recall_score, f1_score, \
    classification_report
from sklearn.ensemble import RandomForestClassifier
import matplotlib.pyplot as plt
import joblib
import seaborn as sns
# from utils.log import Logger
import warnings
warnings.filterwarnings('ignore')

# logger = Logger("../log/RF_train.log")


def init(file_path):
    # 读取数据
    # logger.info("读取数据")
    df = pd.read_csv(file_path, encoding="utf-8")
    # logger.info("读取数据完毕")
    return df


def eda(df):
    # 将Attrition从数值转换为分类变量
    df['Attrition'] = df['Attrition'].apply(lambda x: 'Yes' if x == 1 else 'No')

    # 计算相关性或进行适当的统计检验以确定重要特征
    # 这里我们简化处理，直接选择一些可能重要的特征作为例子
    features_to_plot = ['Age', 'MonthlyIncome', 'JobSatisfaction', 'DistanceFromHome']

    # 设置风格
    sns.set(style="whitegrid")

    # 创建子图
    fig, axes = plt.subplots(2, 2, figsize=(15, 10))

    # 绘制不同类型的图表
    for i, feature in enumerate(features_to_plot):
        row, col = i // 2, i % 2
        # 使用boxplot展示分类变量与数值变量的关系
        sns.boxplot(x='Attrition', y=feature, data=df, ax=axes[row, col])
        axes[row, col].set_title(f'{feature} Distribution by Attrition')

        ymin, ymax = axes[row, col].get_ylim()
        axes[row, col].set_ylim(0, ymax)

    plt.tight_layout()
    plt.savefig("../model/")
    plt.show()


def feature_extract(df):
    # 添加新特征：平均每家公司工作年限
    df = df.drop(columns=["EmployeeNumber", "Over18", "StandardHours"])
    df["CompanyAvgYears"] = (df["TotalWorkingYears"] // df["NumCompaniesWorked"]) \
                        .replace([float('inf'), -float('inf')], pd.NA) \
                        .fillna(0).astype(int)

    return df



def RandomForest_train(df):
    # 1- 对文件进行热编码处理
    One_hot_df = pd.get_dummies(df)
    # print(One_hot_df.info())

    # 2- 处理数据
    # 2.1- 拆分目标值和特征值
    x = One_hot_df.iloc[:, 1:]
    y = One_hot_df.iloc[:, 0]

    # 3- 特征工程
    # 3-1 划分训练集和测试集
    x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.2, random_state=211)

    # 3-2 标准化处理
    transformer = StandardScaler()
    x_train = transformer.fit_transform(x_train)
    x_test = transformer.transform(x_test)

    # 4- 模型训练
    # 4-1 机器学习
    estimator = RandomForestClassifier(criterion="entropy", max_depth=20, n_estimators=200, random_state=211)
    estimator.fit(x_train, y_train)

    # param_dict = {
    #     "criterion": ["gini", "entropy"],
    #     "max_depth": [20],
    #     "random_state": [211],
    #     "n_estimators": [10 * i for i in range(1,20)]
    # }
    # model = GridSearchCV(estimator=estimator, param_grid=param_dict, cv=3)
    # model.fit(x_train, y_train)
    # print(model.best_score_)
    # print(model.best_params_)

    # 4-2 模型评估
    """
        predict：
        predict_proba：输出正例、反例内部会进行概率值和阈值的比较，
        直接输出预测的分类类别的概率值，不会直接告诉你预测的分类类别。
        通过它可以人为设置概率阈值
    """
    y_predict = estimator.predict(x_test)
    y_predict_proba = estimator.predict_proba(x_test)

    print("准确率：", accuracy_score(y_test, y_predict))
    # print("精确率：", precision_score(y_test, y_predict))
    # print("召回率：", recall_score(y_test, y_predict))
    # print("F1：", f1_score(y_test, y_predict))
    # print("ROC：", roc_curve(y_test, y_predict_proba[:, 1]))
    print(f"AUC面积：{roc_auc_score(y_test, y_predict_proba[:, 1])}")
    # print(classification_report(y_test, y_predict, target_names=["留存", "流失"]))

    # 5- 保存模型
    joblib.dump(estimator, "../model/RF_model.pkl")
    joblib.dump(transformer, "../model/RF_transformer.pkl")

    # plt.plot()


if __name__ == '__main__':
    # logger.info("日志记录开始")
    # 准备数据
    df = init("../data/train.csv")

    # 数据探索
    # eda(df)

    # 特征工程
    df = feature_extract(df)

    # 模型训练
    RandomForest_train(df)

    # logger.info("日志记录完毕")
