from data_preprocess_jl import have_train_preprocess_data, train_data_analysis_encode_save, load_train_preprocess_data
from sklearn.model_selection import GridSearchCV, StratifiedKFold
from sklearn.metrics import roc_auc_score, make_scorer
from xgboost import XGBClassifier
from sklearn.utils import class_weight
import joblib
import shap
from utils.log_jl import Logger
from datetime import datetime
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd

# 解决中文乱码问题
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

class Train_XGBoost_TL(object):
    def __init__(self):
        if have_train_preprocess_data() is False:
            train_data_analysis_encode_save()
        data = load_train_preprocess_data()
        self.x_data = data.iloc[:, 1:]
        self.y_data = data.iloc[:, 0]
        self.train_model = None
        self.reduced_model = None
        # 配置日志记录
        logfile_name = 'train' + str(datetime.now().strftime('%Y%m%d%H%M%S'))
        self.logfile = Logger('../', logfile_name).get_logger()

    def grain_search_train_save_xgb_model(self):
        x = self.x_data
        y = self.y_data
        learning_rate = np.arange(0.2, 0.31, 0.01)
        # print(learning_rate)
        # 阶段一：确定基础结构
        param_grid = {
            # 'n_estimators': [95, 100, 149, 150, 151, 152, 153, 154, 155, 156],
            'n_estimators': [i for i in range(295, 301)],
            # 'max_depth': [1, 2, 3, 4, 5, 6, 7],
            'max_depth': [1, 2, 3],
            # 'learning_rate': [0.05, 0.08, 0.1, 0.01, 0.09, 0.11, 0.12, 0.13, 0.14, 0.15, 0.16, 0.17, 0.18]
            'learning_rate': learning_rate
        }

        model = XGBClassifier(
            eval_metric='auc',
            # use_label_encoder=False,
            tree_method='hist'  # 加快训练
        )

        skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
        scorer = make_scorer(roc_auc_score, needs_proba=True)

        cls_weight = class_weight.compute_sample_weight('balanced', y)
        fit_params = {'sample_weight': cls_weight}

        grid_search = GridSearchCV(model, param_grid, scoring=scorer, cv=skf, n_jobs=-1)
        grid_search.fit(x, y, **fit_params)

        self.logfile.info(f'best_params: {grid_search.best_params_}')
        print(f'best_params: {grid_search.best_params_}')
        self.logfile.info(f'best_score(auc cv): {grid_search.best_score_}')
        print(f'best_score(auc cv): {grid_search.best_score_}')
        self.train_model = grid_search.best_estimator_
        joblib.dump(self.train_model, '../output/model/xgboost_model_jl.pkl')

        return self.train_model


    def shap_graph(self):
        '''
        SHAP分析，画出相关图表并保存到本地目录
        :return:
        '''
        # SHAP 特征重要性分析
        explainer = shap.Explainer(self.train_model)
        x = self.x_data
        shap_values = explainer.shap_values(x)

        # 1.条形图 排序特征重要性
        plt.figure(figsize=(10, 6))
        shap.summary_plot(shap_values, x, plot_type='bar', show=False)
        ax = plt.gca()
        ax.set_title('SHAP特征重要性排序柱状图')
        ax.set_xlabel('平均绝对SHAP值')
        ax.set_ylabel('特征名称')
        plt.tight_layout()
        plt.savefig('../output/figure/SHAP特征重要性排序柱状图.png', bbox_inches='tight')
        plt.show()

        # 2.散点图（查看SHAP分布）
        plt.figure(figsize=(10, 6))
        shap.summary_plot(shap_values, x, show=False)
        ax = plt.gca()
        ax.set_title('SHAP特征影响散点图')
        ax.set_xlabel('模型输出值变化量')
        ax.set_ylabel('特征名称')
        if len(ax.collections) > 0 and ax.collections[0].colorbar:
            ax.collections[0].colorbar.set_label("特征值")
        plt.tight_layout()
        plt.tight_layout()
        plt.savefig("../output/figure/SHAP特征影响散点图.png", bbox_inches='tight')
        plt.show()

        # 3. 依赖图（查看每个特征与 SHAP 的关系）
        for feature in x.columns:
            plt.figure(figsize=(8, 6))
            shap.dependence_plot(feature, shap_values, x, interaction_index=None, show=False)
            ax = plt.gca()
            ax.set_title(f"{feature}的SHAP依赖图")
            ax.set_xlabel(f"特征值: {feature}")
            ax.set_ylabel("SHAP值（预测影响）")
            plt.tight_layout()
            plt.savefig(f"../output/figure/{feature}的SHAP依赖图.png", bbox_inches='tight')
            plt.close()

    def get_low_shap_features(self, n_remove=5):
        """
        自动识别 SHAP 值最小的 N 个特征
        :param n_remove: 要识别的低重要性特征数量
        :return: list of feature names
        """
        x = self.x_data
        y = self.y_data

        # 获取 SHAP 值
        explainer = shap.Explainer(self.train_model)
        shap_values = explainer.shap_values(x)

        # 计算平均绝对 SHAP 值
        mean_abs_shap = np.mean(np.abs(shap_values), axis=0)
        feature_importance = pd.DataFrame({
            'feature': x.columns,
            'mean_abs_shap': mean_abs_shap
        }).sort_values(by='mean_abs_shap', ascending=True)
        print(f'SHAP特征重要性feature_importance = {feature_importance}')
        low_features = feature_importance.head(n_remove)['feature'].tolist()
        self.logfile.info(f'要删除的低重要性特征: {low_features}')
        print(f'要删除的低重要性特征: {low_features}')

        return low_features

    def retrain_model_with_reduced_features(self, low_features):
        """
        删除指定特征后重新训练模型
        :param low_features: 要删除的特征列表
        :return: 新模型
        """
        x = self.x_data.drop(columns=low_features)
        y = self.y_data

        best_params = self.train_model.get_params()

        # 保留原参数重新训练
        new_model = XGBClassifier(**best_params)
        cls_weight = class_weight.compute_sample_weight('balanced', y)
        fit_params = {'sample_weight': cls_weight}

        new_model.fit(x, y, **fit_params)

        # 保存新模型
        joblib.dump(new_model, '../output/model/xgboost_model_jl_reduced.pkl')
        self.logfile.info("精简模型保存成功！")
        print("精简模型保存成功！")

        self.reduced_model = new_model
        return new_model

    # def retrain_with_gridsearch_after_feature_removal(self, low_features):
    #     x_reduced = self.x_data.drop(columns=low_features)
    #     y = self.y_data
    #
    #     param_grid = {
    #         'n_estimators': [i for i in range(150, 180, 5)],
    #         'max_depth': [2, 3, 4],
    #         'learning_rate': np.arange(0.25, 0.32, 0.01)
    #     }
    #
    #     model = XGBClassifier(
    #         eval_metric='auc',
    #         # use_label_encoder=False,
    #         tree_method='hist'
    #     )
    #
    #     scorer = make_scorer(roc_auc_score, needs_proba=True)
    #     skf = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)
    #
    #     grid_search = GridSearchCV(model, param_grid, scoring=scorer, cv=skf, n_jobs=-1)
    #     grid_search.fit(x_reduced, y)
    #
    #     best_model = grid_search.best_estimator_
    #     joblib.dump(best_model, '../output/model/xgboost_model_jl_retrained.pkl')
    #
    #     return best_model



