import os
import matplotlib
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import datetime
from sklearn.preprocessing import StandardScaler
from utils.log import Logger
from utils.common import data_preprocessing
from features.feature_engineering import feature_engineering
from xgboost import XGBRegressor
from sklearn.model_selection import train_test_split, GridSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import mean_squared_error, mean_absolute_error, root_mean_squared_error
from sklearn.metrics import accuracy_score, roc_auc_score, confusion_matrix, classification_report
import joblib

matplotlib.use('TkAgg')  # 或者 'Qt5Agg'=
plt.rcParams['font.family'] = 'SimHei'
plt.rcParams['font.size'] = 15


# 1、定义人才流失模型类、配置日志文件、获取数据源
class EmployeeAttritionModel:
    # 1、初始化属性信息
    def __init__(self, file_path):
        # 2、拼接日志文件名
        logfile_name = 'train_' + datetime.datetime.now().strftime('%Y%m%d%H%M%S')
        # 3、创建日志对象
        self.logfile = Logger('../', logfile_name).get_logger()
        # 4、测试人才流失类
        self.logfile.info("人才流失模型类已经被创建")
        # 5、获取数据源
        self.data_source = pd.read_csv(file_path)


# 2、逻辑回归模型的训练
def Logistic_Regression_Model(feature, target, logfile):
    # 1、拷贝原数据
    feature_data = feature.copy()
    target_data = target.copy()
    print("----------------------------------------------")
    # 2、划分数据集和训练集
    x_train, x_test, y_train, y_test = train_test_split(
        feature_data,
        target_data,
        test_size=0.2,
        random_state=23,
        stratify=target_data
    )
    # 3、对训练集和测试集进行标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 4、创建逻辑回归模型
    estimator = LogisticRegression(solver='saga', max_iter=5000, random_state=23)
    # 5、创建逻辑回归超参列表
    # 5、创建逻辑回归超参数列表（新增）
    param_grid = {
        'C': [0.01, 0.1, 1, 10],  # 正则化强度
        'class_weight': [None, 'balanced'],  # 类别不平衡处理
        'penalty': ['l1', 'l2']  # 正则化的类型
    }

    # 6、网格搜索 + 交叉验证（新增）
    grid_search = GridSearchCV(
        estimator=estimator,
        param_grid=param_grid,
        cv=5,
        scoring='roc_auc',  # AUC 比 accuracy 更适合不平衡数据
        n_jobs=-1,
        verbose=1
    )

    # 7、训练模型
    grid_search.fit(x_train, y_train)

    # 8、输出最佳参数和评分（新增）
    logfile.info(f"逻辑回归最佳参数: {grid_search.best_params_}")
    logfile.info(f"逻辑回归最佳AUC: {grid_search.best_score_:.4f}")

    # 9、使用最佳模型预测测试集（新增）
    best_model = grid_search.best_estimator_
    y_pred = best_model.predict(x_test)
    y_prob = best_model.predict_proba(x_test)[:, 1]

    # 10、评估指标（新增）
    acc = accuracy_score(y_test, y_pred)
    auc = roc_auc_score(y_test, y_prob)
    cm = confusion_matrix(y_test, y_pred)
    report = classification_report(y_test, y_pred)

    logfile.info(f"测试集准确率: {acc:.4f}")
    logfile.info(f"测试集AUC: {auc:.4f}")
    logfile.info(f"测试集混淆矩阵:\n{cm}")
    logfile.info(f"测试集分类报告:\n{report}")


def Model_save(estimator, feature, target, model_path, logfile=None):
    # 1、拷贝原数据
    feature_data = feature.copy()
    target_data = target.copy()
    # 2、划分数据集和训练集
    x_train, x_test, y_train, y_test = train_test_split(
        feature_data,
        target_data,
        test_size=0.2,
        random_state=23,
        stratify=target_data
    )
    # 3、对训练集和测试集进行标准化
    transfer = StandardScaler()
    x_train = transfer.fit_transform(x_train)
    x_test = transfer.transform(x_test)
    # 4、训练模型
    estimator.fit(x_train, y_train)
    y_perdict = estimator.predict(x_test)
    # 5、评估模型
    logfile.info(f"MSE:{mean_squared_error(y_test, y_perdict)}")
    logfile.info(f"MAE:{mean_absolute_error(y_test, y_perdict)}")
    logfile.info(f"RMSE:{root_mean_squared_error(y_test, y_perdict)}")
    # 6、将模型保存到本地
    joblib.dump(estimator, model_path)
    logfile.info(f"模型保存成功，保存路径：{os.path.abspath(model_path)}")


# 测试
if __name__ == '__main__':
    # 1、创建模型确认是否成功创建
    eam = EmployeeAttritionModel('../data/processed/train.csv')
    # eam.data_source.info()
    # 2、获取特征数据和特征名
    feature_df, target_df = feature_engineering(eam.data_source, eam.logfile)
    # 3、训练逻辑回归模型
    Logistic_Regression_Model(feature_df, target_df, eam.logfile)
    # 3.1、保存最优的逻辑回归模型
    lr_estimator = LogisticRegression(
        C=0.1,
        class_weight=None,
        penalty='l2',
        solver='saga',
        max_iter=5000,
        random_state=23
    )
    # Model_save(
    #     lr_estimator,
    #     feature_df,
    #     target_df,
    #     'models/LogisticRegressionModel.pkl',
    #     eam.logfile
    # )