import pandas as pd
import numpy as np
import xgboost as xgb
from sklearn.model_selection import train_test_split, GridSearchCV, StratifiedKFold
from sklearn.metrics import classification_report, confusion_matrix, \
                            roc_auc_score, roc_curve, accuracy_score, f1_score, \
                            precision_score, recall_score, make_scorer
import matplotlib.pyplot as plt
import seaborn as sns
import datetime
import joblib
from features import FeatureEngineering
from utils.log import Logger
from imblearn.over_sampling import SMOTE


class Train(object):
    def __init__(self, data_path='../data/train.csv'):
        """初始化"""
        # 配置日志
        train_log_name = "train_" + datetime.datetime.now().strftime("%Y%m%d%H%M%S")
        self.logger = Logger('../log', train_log_name).get_logger()

        # 初始化特征工程类
        self.fe = FeatureEngineering(data_path)
        self.model = None
        self.selected_features = None
        self.high_corr_pairs = None

        self.logger.info("模型训练类初始化完成".center(50, '='))

    def prepare_data(self):
        """训练数据处理"""
        self.logger.info("准备训练数据".center(50, '='))

        # 使用特征工程类进行数据预处理
        x, y = self.fe.preprocess()  # 数据预处理
        self.fe.feature_importance_rf()  # 特征重要性
        self.fe.select_features_rfe()  # 递归特征消除（RFECV）选择特征
        self.high_corr_pairs = self.fe.features_corr()  # 计算 RFECV后 高相关性特征对

        # 获取 RFECV 选择的特征
        self.selected_features = self.fe.selected_features
        self.logger.info(f"RFECV 选择的特征数量: {len(self.selected_features)}")
        self.logger.info(f"特征列表: {self.selected_features})")

        # 记录高相关性特征对
        if self.high_corr_pairs:
            self.logger.info(f"发现 {len(self.high_corr_pairs)} 对高相关性特征:")
            for row, col, corr_value in self.high_corr_pairs:
                self.logger.info(f"  {row} & {col}: {corr_value:.3f}")
        else:
            self.logger.info("无 高相关性 特征对")

        # 分析 高相关性特征 删除'JobLevel'特征
        if 'JobLevel' in self.selected_features:
            self.selected_features.remove('JobLevel')
            self.logger.info("分析 高相关性特征 删除'JobLevel'特征")
            self.logger.info(f"删除后特征数量: {len(self.selected_features)}")

        # 使用选择的特征准备训练数据
        x_final = self.fe.x[self.selected_features]

        self.logger.info("训练数据准备完成".center(50, '='))
        return x_final, self.fe.y

    def train_model(self, x, y):
        """训练XGBoost模型,加入交叉验证网格搜索"""
        self.logger.info("开始训练 交叉验证网格搜索 XGBoost模型".center(50, '='))

        # 划分训练集和测试集
        x_train, x_test, y_train, y_test = train_test_split(
            x, y, test_size=0.2, random_state=42, stratify=y
        )

        self.logger.info(f"训练集形状: {x_train.shape}")
        self.logger.info(f"测试集形状: {x_test.shape}")

        # # SMOTE 过采样
        # self.logger.info("开始 SMOTE 过采样处理训练集")
        # smote = SMOTE(random_state=42)
        # x_train_res, y_train_res = smote.fit_resample(x_train, y_train)
        # self.logger.info(f"过采样后训练集形状: {x_train_res.shape}, 正类比例: {y_train_res.mean():.2f}")

        # 定义参数网格
        param_grid = {
            'n_estimators': [25, 50, 75, 100, 150, 200],
            'max_depth': [3, 4, 5, 6, 7],
            'learning_rate': [0.01, 0.05, 0.1, 0.15, 0.2, 0.5],
            'reg_alpha': [0, 0.1, 0.5, 1.0], # L1正则
            'reg_lambda': [0.5, 1.0, 1.5], # L2正则
            'gamma': [0, 0.1, 0.2, 0.5], # 最小分裂损失。较大的值使模型更保守，减少过拟合
            'random_state': [11, 42, 123, 456, 789, 2025]
        }

        pos_weight = (y_train.value_counts()[0] / y_train.value_counts()[1])
        # 实例化模型
        xgb_model = xgb.XGBClassifier(random_state=42,
                                      n_jobs=-1,
                                      scale_pos_weight=pos_weight)

        # 设置5折交叉验证
        cv = StratifiedKFold(n_splits=5, shuffle=True, random_state=42)

        # 网格搜索
        self.logger.info("开始5折交叉验证网格搜索")
        grid_search = GridSearchCV(
            estimator=xgb_model,
            param_grid=param_grid,
            scoring='f1',
            cv=cv,
            n_jobs=-1,
            verbose=10
        )

        # 训练模型
        grid_search.fit(x_train, y_train)

        # 获取最佳模型
        self.model = grid_search.best_estimator_

        self.logger.info(f"网格搜索完成")
        self.logger.info(f"最佳参数: {grid_search.best_params_}")
        self.logger.info(f"最佳交叉验证得分: {grid_search.best_score_:.4f}")

        # 在测试集上评估
        y_pred = self.model.predict(x_test)
        y_pred_proba = self.model.predict_proba(x_test)[:, 1]  # 设置阈值

        # 模型评估
        self.logger.info("模型在测试集上的评估结果:")
        self.logger.info(f"准确率: {accuracy_score(y_test, y_pred):.4f}")
        self.logger.info(f"F1-score：{f1_score(y_test, y_pred)}")
        self.logger.info(f"\n分类报告:\n{classification_report(y_test, y_pred)}")
        self.logger.info(f"AUC分数: {roc_auc_score(y_test, y_pred_proba):.4f}")

        # 保存评估结果
        self.x_train, self.x_test = x_train, x_test
        self.y_train, self.y_test = y_train, y_test
        self.y_pred, self.y_pred_proba = y_pred, y_pred_proba

        self.logger.info("网格搜索训练完成".center(50, '='))

        # self.logger.info("开始阈值调整".center(50, '='))
        # # 用 predict_proba 得到预测概率
        # y_scores = self.model.predict_proba(x_test)[:, 1]
        #
        # best_threshold = 0.5
        # best_f1 = 0
        #
        # for t in np.arange(0.1, 0.9, 0.01):  # 0.1 到 0.9 步长 0.01
        #     y_pred_adj = (y_scores >= t).astype(int)
        #     f1 = f1_score(y_test, y_pred_adj)
        #     if f1 > best_f1:
        #         best_f1 = f1
        #         best_threshold = t
        #
        # self.logger.info(f"最佳阈值: {best_threshold:.2f}, 对应F1: {best_f1:.4f}")
        # self.logger.info(f"Precision: {precision_score(y_test, (y_scores >= best_threshold).astype(int)):.4f}")
        # self.logger.info(f"Recall: {recall_score(y_test, (y_scores >= best_threshold).astype(int)):.4f}")
        # self.logger.info(f"完成阈值调整".center(50, '='))

        return self.model

    def visualize_results(self):
        """可视化模型结果"""
        self.logger.info("开始可视化结果".center(50, '='))

        # 混淆矩阵可视化
        plt.figure(figsize=(8, 6))
        cm = confusion_matrix(self.y_test, self.y_pred)
        sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
        plt.title('混淆矩阵')
        plt.xlabel('预测标签')
        plt.ylabel('真实标签')
        plt.tight_layout()
        plt.savefig('../data/fig/confusion_matrix.png', dpi=300, bbox_inches='tight')
        plt.show()

        # ROC曲线可视化
        plt.figure(figsize=(8, 6))
        fpr, tpr, _ = roc_curve(self.y_test, self.y_pred_proba)
        auc_score = roc_auc_score(self.y_test, self.y_pred_proba)
        plt.plot(fpr, tpr, label=f'ROC Curve (AUC = {auc_score:.4f})')
        plt.plot([0, 1], [0, 1], 'k--', label='Random Classifier')
        plt.xlabel('假正率 (False Positive Rate)')
        plt.ylabel('真正率 (True Positive Rate)')
        plt.title('ROC曲线')
        plt.legend()
        plt.grid(True)
        plt.tight_layout()
        plt.savefig('../data/fig/roc_curve.png', dpi=300, bbox_inches='tight')
        plt.show()

        # 特征重要性可视化
        feature_importance = pd.Series(self.model.feature_importances_, index=self.selected_features).sort_values(
            ascending=False)
        plt.figure(figsize=(10, 8))
        sns.barplot(x=feature_importance.values, y=feature_importance.index, palette='viridis')
        plt.title('XGBoost特征重要性')
        plt.xlabel('重要性')
        plt.tight_layout()
        plt.savefig('../data/fig/xgb_feature_importance.png', dpi=300, bbox_inches='tight')
        plt.show()

        self.logger.info("结果可视化完成".center(50, '='))

    def save_model(self):
        """保存模型和特征列表"""
        self.logger.info("开始保存模型".center(50, '='))

        # 保存模型
        model_path = '../model/xgb_model.pkl'
        joblib.dump(self.model, model_path)
        self.logger.info(f"模型已保存至: {model_path}")

        # 保存特征列表
        features_path = '../model/selected_features.json'
        import json
        with open(features_path, 'w') as f:
            json.dump(self.selected_features, f)
        self.logger.info(f"特征列表已保存至: {features_path}")

        self.logger.info("模型保存完成".center(50, '='))

    def train(self):
        """开始训练"""
        self.logger.info("开始训练XGBoost模型".center(50, '='))

        # 准备数据
        x, y = self.prepare_data()
        # 训练模型
        model = self.train_model(x, y)
        # 可视化结果
        self.visualize_results()
        # 保存模型
        self.save_model()

        self.logger.info("完整训练流程完成".center(50, '='))

        return model


if __name__ == "__main__":
    trainer = Train('../data/train.csv')
    model = trainer.train()
