import xgboost as xgb
import numpy as np
import traceback
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score
from config.ssq_config import logger, SSQ_CONFIG
import trainer.model_train.model_utils as model_utils
from trainer.model_train.common_train import BaseModelTrainer
import pandas as pd
import os
from typing import Dict, Any


class XGBModelTrainer(BaseModelTrainer):
    def __init__(self, label_col: str, data_dict: Dict,load_version: str = None,run_type: str = 'train'):
        super().__init__(label_col, data_dict, model_type="xgb",load_version=load_version,run_type=run_type)
        # 保留原始数据变量
        self.run_type = run_type
        self.X_train = None
        self.X_val = None
        self.y_train = None
        self.y_val = None
        self.feature_cols = None
        self.label_threshold = None
        self.xgb_device = "cuda" if SSQ_CONFIG.get("use_gpu", False) else "cpu"  # 保留原始设备逻辑
        # 新增：初始化训练中间参数存储变量
        self.train_process_info["training_info"] = self.train_process_info.get("training_info", {})

    def _load_model(self) -> bool:
        """保留原始模型加载逻辑, 新增续训参数校验"""
        try:
            model_path = self.train_process_info.get("model_path", "")
            if not model_path or not os.path.exists(model_path):
                model_files = [f for f in os.listdir(self.model_process_dir) if f.endswith((".model", ".bin"))]
                if model_files:
                    model_path = os.path.join(self.model_process_dir, model_files[0])
                else:
                    logger.warning(f"未找到XGB模型文件, 将重新训练")
                    return False
            
            self.model = xgb.Booster()
            self.model.load_model(model_path)
            self.model.set_param({"device": self.xgb_device})
            
            # 新增：续训时校验模型参数是否存在
            if self.check_version_exists and "model_params" not in self.train_process_info:
                logger.warning(f"历史训练信息中未找到模型参数, 将使用新参数续训")
            
            logger.info(f"XGB模型加载成功: {model_path}")
            return True
        except Exception as e:
            logger.error(f"加载XGB模型失败: {str(e)}")
            self.model = None
            return False

    def _init_data(self) -> bool:
        """保留原始数据初始化逻辑，修复非续训场景特征列被清空的问题"""
        try:
            self.X_train = self.data_dict["X_train"]
            self.X_val = self.data_dict["X_val"]
            self.y_train = self.data_dict["y_train"]
            self.y_val = self.data_dict["y_val"]
            self.feature_cols = self.data_dict["feature_cols"]  # 原始特征列（101个）
            self.label_threshold = self.data_dict["threshold"]
            
            # 续训时校验特征一致性 + 按历史顺序对齐（非续训时跳过）
            if self.check_version_exists:
                saved_feat_cols = self.train_process_info["feature_info"].get("feature_columns", [])
                logger.info(f'saved_feat_cols:{saved_feat_cols}')
                logger.info(f'当前原始特征列:{self.feature_cols}')

                # 校验数量和列名一致性（原有逻辑不变）
                if len(saved_feat_cols) != len(self.feature_cols):
                    raise ValueError(f"__init_data(): 特征列不一致！历史{len(saved_feat_cols)}个, 当前{len(self.feature_cols)}个")
                
                if set(saved_feat_cols) != set(self.feature_cols):
                    missing_in_current = set(saved_feat_cols) - set(self.feature_cols)
                    missing_in_saved = set(self.feature_cols) - set(saved_feat_cols)
                    raise ValueError(
                        f"特征列名不一致！历史有但当前没有: {missing_in_current}\n"
                        f"当前有但历史没有: {missing_in_saved}"
                    )

                # 仅续训时：按历史特征列的顺序重新排列（确保与历史一致）
                self.feature_cols = [col for col in saved_feat_cols if col in self.feature_cols]
                logger.info(f"续训场景 - 特征列顺序已对齐：按历史顺序排列，共{len(self.feature_cols)}个特征")
            else:
                # 非续训场景（第一次训练）：直接保留原始特征列，不做排列
                logger.info(f"非续训场景 - 保留原始特征列：共{len(self.feature_cols)}个特征")
            
            # 计算数据统计(保留原始逻辑)
            train_pos_count = int(np.sum(self.y_train == 1))
            train_neg_count = int(np.sum(self.y_train == 0))
            val_pos_count = int(np.sum(self.y_val == 1))
            val_neg_count = int(np.sum(self.y_val == 0))
            data_stats = {
                "train_set": {
                    "sample_count": len(self.X_train),
                    "feature_count": len(self.feature_cols),
                    "positive_count": train_pos_count,
                    "negative_count": train_neg_count,
                    "positive_ratio": round(train_pos_count / len(self.y_train), 4) if len(self.y_train) else 0.0
                },
                "val_set": {
                    "sample_count": len(self.X_val),
                    "feature_count": len(self.feature_cols),
                    "positive_count": val_pos_count,
                    "negative_count": val_neg_count,
                    "positive_ratio": round(val_pos_count / len(self.y_val), 4) if len(self.y_val) else 0.0
                }
            }
            self._save_data_statistics(data_stats)  # 调用基类保存方法
            self._save_feature_columns(self.feature_cols)  # 调用基类保存方法
            return True
        except Exception as e:
            logger.error(f"_init_data() 数据初始化失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def train(self) -> Dict[str, Any]:
        """完整保留原始XGB训练逻辑, 新增续训支持和中间参数保存"""
        logger.info(f"开始训练XGB - 目标标签: {self.label_col}, 设备: {self.xgb_device}")
        self.train_process_info["status"] = "running"
        self.train_process_info["device"] = self.xgb_device
        self._save_train_process_info()  # 调用基类保存方法
        
        try:
            # 1. 初始化数据(保留原始逻辑)
            if not self._init_data():
                return {"status": "fail", "label_col": self.label_col, "error": "数据初始化失败"}

            # 2. 标签处理(保留原始逻辑)
            self.y_train = np.where(self.y_train >= self.label_threshold, 1, 0)
            self.y_val = np.where(self.y_val >= self.label_threshold, 1, 0)

            # 3. 计算类别权重(保留原始逻辑)
            pos_count = len(self.y_train[self.y_train == 1])
            neg_count = len(self.y_train[self.y_train == 0])
            pos_weight = neg_count / pos_count if pos_count > 0 else 1.0

            # 4. 完整保留原始XGB参数, 新增续训参数复用和中间保存
            if self.check_version_exists and "model_params" in self.train_process_info:
                # 续训时复用历史参数(包含所有超参数)
                xgb_params = self.train_process_info["model_params"]
                label_seed = xgb_params["seed"]
                # 续训时保持设备一致性
                xgb_params["device"] = self.xgb_device
                logger.info(f"复用历史XGB参数, seed={label_seed}, 续训模式开启")
            else:
                # 新训练时使用完整参数
                label_seed = SSQ_CONFIG.get("random_seed", 42) + hash(self.label_col) % 1000
                xgb_params = {
                    "objective": "binary:logistic",
                    "eval_metric": ["auc", "logloss"],
                    "learning_rate": 0.07,
                    "max_depth": 8,
                    "scale_pos_weight": pos_weight,  # 保留原始参数
                    "subsample": 0.7,  # 保留原始参数
                    "colsample_bytree": 0.75,  # 保留原始参数
                    "colsample_bylevel": 0.8,  # 保留原始参数
                    "seed": label_seed,
                    "verbosity": 0,
                    "tree_method": "hist",
                    "device": self.xgb_device,
                    "max_bin": 256,  # 保留原始参数
                    "min_child_weight": 1,  # 保留原始参数
                    "gamma": 0.1,  # 保留原始参数
                    "reg_alpha": 0.1,  # 保留原始参数
                    "reg_lambda": 1.0,  # 保留原始参数
                    "nthread": 1,
                }
            
            # 关键修改：保存模型参数到训练过程信息(支持续训复用)
            self.train_process_info["model_params"] = xgb_params
            self._save_train_process_info()  # 即时保存参数, 防止训练中断丢失

            # 5. 构建数据集(保留原始逻辑)
            dtrain = xgb.DMatrix(self.X_train, label=self.y_train)
            dval = xgb.DMatrix(self.X_val, label=self.y_val)

            # 6. 训练模型(增强续训支持, 保存中间训练信息)
            evals_result = {}
            early_stop = xgb.callback.EarlyStopping(rounds=30, maximize=True)
            
            # 新增：续训时打印历史迭代信息
            if self.check_version_exists and self.model is not None:
                history_iter = self.train_process_info["training_info"].get("total_iterations", 0)
                logger.info(f"续训：历史已训练{history_iter}棵树, 将继续训练")

            self.model = xgb.train(
                params=xgb_params,
                dtrain=dtrain,
                num_boost_round=1000,  # 保留原始轮数
                evals=[(dval, "validation")],
                evals_result=evals_result,
                callbacks=[early_stop],
                xgb_model=self.model if self.check_version_exists else None  # 支持续训加载已有模型
            )
            
            # 修正：完善XGBoost迭代次数计算逻辑, 兼容未触发早停的情况
            # 获取当前已训练的树数量(XGBoost中为1基计数)
            total_trees = self.model.attr('best_iteration') if self.model.attr('best_iteration') else self.model.num_boosted_rounds()

            if self.model.best_iteration is not None:
                # best_iteration是0基索引, 转换为1基
                best_iteration_1based = self.model.best_iteration + 1
                total_iterations = best_iteration_1based  # 早停时以最佳迭代次数为准
            else:
                # 未触发早停时, 使用实际训练的轮次
                best_iteration_1based = None
                total_iterations = total_trees

            # 保存训练中间信息(总迭代次数、最佳迭代轮次)
            self.train_process_info["training_info"] = {
                "total_iterations": total_iterations,  # 总迭代次数(1基)
                "best_iteration": self.model.best_iteration,  # 原始0基索引
                "best_iteration_1based": best_iteration_1based,  # 1基索引, 便于理解
                "total_trees": total_trees,  # 已训练的树总数(1基)
                "early_stop_rounds": 30,
                "num_boost_round": 1000
            }
            # 保存评估结果到训练过程信息
            self.train_process_info["evals_result"] = evals_result
            self._save_train_process_info()  # 保存中间训练状态

            # 修正：保存训练中间信息(处理best_iteration可能为None的情况),XGBoost 专属的迭代次数计算逻辑
            if self.model.best_iteration is not None:
                # best_iteration是0基索引, 转换为1基总迭代次数
                total_iterations = self.model.best_iteration + 1
            else:
                # 未触发早停时, 用num_boosted_rounds()获取实际训练轮次(1基)
                total_iterations = self.model.num_boosted_rounds()

            # 获取当前迭代轮次(XGBoost 1基索引)
            current_iter = self.model.num_boosted_rounds()

            self.train_process_info["training_info"] = {
                "total_iterations": total_iterations,
                "best_iteration": self.model.best_iteration,  # 保留原始0基索引
                "best_iteration_1based": self.model.best_iteration + 1 if self.model.best_iteration is not None else None,
                "current_iteration": current_iter,  # 修正：使用XGBoost原生方法
                "early_stop_rounds": 30,
                "num_boost_round": 1000,
            }
            self.train_process_info["evals_result"] = evals_result
            self._save_train_process_info()

            # 7. 计算最优阈值(保留原始逻辑)
            y_pred_proba = self.model.predict(dval)
            thresholds = np.arange(0.1, 0.9, 0.01)
            f1_scores = [f1_score(self.y_val, (y_pred_proba >= thresh).astype(int)) for thresh in thresholds]
            best_idx = np.argmax(f1_scores)
            threshold = thresholds[best_idx]
            best_f1 = f1_scores[best_idx]
            self._save_threshold({  # 调用基类保存方法
                "optimal_threshold": round(threshold, 4),
                "best_f1_score": round(best_f1, 4)
            })

            # 8. 评估指标(保留原始逻辑, 补充保存到训练过程信息)
            y_pred = (y_pred_proba >= threshold).astype(int)
            metrics = {
                "auc": round(roc_auc_score(self.y_val, y_pred_proba), 4),
                "f1": round(f1_score(self.y_val, y_pred), 4),
                "precision": round(precision_score(self.y_val, y_pred), 4),
                "recall": round(recall_score(self.y_val, y_pred), 4)
            }
            self.train_process_info["evaluation_metrics"] = metrics

            # 9. 保存模型和完整训练信息(保留原始逻辑, 确保参数同步保存)
            model_name = f"xgb_{self.label_col}"
            save_path = model_utils.save_model(
                model=self.model,
                model_name=model_name,
                model_type="xgb",
                feature_cols=self.feature_cols,
                model_params=xgb_params,
                threshold=threshold,
                version=self.train_version,
            )
            self.train_process_info["model_path"] = save_path
            self.train_process_info["status"] = "success"
            self._save_train_process_info()  # 保存完整训练结果(含参数、中间信息、指标)

            # 补充返回训练中间信息
            logger.info(f"[XGB-def train()]-[{self.label_col}]训练完成, 返回设备: {self.xgb_device}")
            return {
                "status": "success",
                "label_col": self.label_col,
                "metrics": metrics,
                "model_path": save_path,
                "version": self.train_version,
                "device": self.xgb_device,
                "total_iterations": total_iterations,
                "best_iteration": self.model.best_iteration
            }

        except Exception as e:
            # 异常时保存错误信息
            self.train_process_info["status"] = "fail"
            self.train_process_info["error"] = str(e)
            self.train_process_info["error_traceback"] = traceback.format_exc()
            logger.error(f'[XGB-def train()]-模型训练失败, 请检查: {str(e)}\n{traceback.format_exc()}')
            self._save_train_process_info()
            return {"status": "fail", "label_col": self.label_col, "error": f"{str(e)}\n{traceback.format_exc()}"}

    @staticmethod
    def print_training_summary(all_results: list):
        """批量训练完成后, 打印所有模型汇总表格(保留原始逻辑)
        all_results的字典如下: 
        [ {
                'label_col': 'r32_next',
                'metrics': {
                    'auc': 0.547,
                    'f1': 0.3195,
                    'precision': 0.1942,
                    'recall': 0.9,
                    'sample_count': 496,
                    'positive_sample_count': 90
                },
                'status': 'success',
                'version': 'vXGB_2025102917_train',
                'device': 'cpu'
            }, {
                'label_col': 'r33_next',
                'metrics': {
                    'auc': 0.502,
                    'f1': 0.3072,
                    'precision': 0.1815,
                    'recall': 1.0,
                    'sample_count': 496,
                    'positive_sample_count': 90
                },
                'status': 'success',
                'version': 'vXGB_2025102917_train',
                'device': 'cpu'
            }
        ]
        """
        logger.info("="*100)
        logger.info("                      XGBoost 训练结果汇总表格")
        logger.info("="*100)
        logger.info(f'all_results: {all_results}')
        
        summary_data = []
        version = 'unknown'
        for res in all_results:
            label_col = res["label_col"]
            status = res["status"]
            metrics = res.get("metrics", {})
            train_device = res.get("device", "cpu")
            version = res.get("version", "unknown")
            best_ntree = res.get("best_iteration", "N/A")
            total_trees = res.get("total_iterations", "N/A")
            summary_data.append({
                "标签列": label_col,
                "训练版本": version,
                "训练设备": train_device,
                "累计总树数": total_trees,
                "最佳树数量": best_ntree,
                "AUC": metrics.get("auc", 0.0),
                "F1分数": metrics.get("f1", 0.0),
                "精确率": metrics.get("precision", 0.0),
                "召回率": metrics.get("recall", 0.0),
                "训练状态": status
            })
        
        summary_df = pd.DataFrame(summary_data)
        numeric_cols = ["AUC", "F1分数", "精确率", "召回率"]
        for col in numeric_cols:
            summary_df[col] = summary_df[col].round(4)
        
        logger.info("\n" + summary_df.to_string(index=False, col_space=10))
        
        # 统计信息
        total = len(all_results)
        success_count = sum(1 for res in all_results if res["status"] == "success")
        success_rate = (success_count / total) * 100 if total > 0 else 0.0
        avg_auc = summary_df[summary_df["训练状态"] == "success"]["AUC"].mean() if success_count > 0 else 0.0
        avg_f1 = summary_df[summary_df["训练状态"] == "success"]["F1分数"].mean() if success_count > 0 else 0.0
        
        logger.info("-"*100)
        logger.info(f"训练统计: 总标签数={total} | 成功数={success_count} | 成功率={success_rate:.2f}%")
        logger.info(f"平均性能(成功模型): 平均AUC={avg_auc:.4f} | 平均F1={avg_f1:.4f}")
        logger.info("="*100 + "\n")
        
        # 屏幕打印
        print("="*100)
        print("                      XGBoost 训练结果汇总表格")
        print("="*100)
        print(summary_df.to_string(index=False, col_space=8))
        print("-"*100)
        print(f"训练统计: 总标签数={total} | 成功数={success_count} | 成功率={success_rate:.2f}%")
        print(f"平均性能(成功模型): 平均AUC={avg_auc:.4f} | 平均F1={avg_f1:.4f}")
        print("="*100 + "\n")


if __name__ == "__main__":
    # 示例初始化代码(支持验证续训功能)
    try:
        sample_data = {
            "X_train": np.random.rand(1000, 50),
            "X_val": np.random.rand(200, 50),
            "y_train": np.random.randint(0, 2, 1000),
            "y_val": np.random.randint(0, 2, 200),
            "feature_cols": [f"feat_{i}" for i in range(50)],
            "threshold": 0.5
        }
        
        # 首次训练
        logger.info("====== 首次训练 ======")
        trainer = XGBModelTrainer(label_col="r1_next", data_dict=sample_data)
        # trainer.xgb_device = "cuda"  # 启用GPU训练
        result = trainer.train()
        version = result["version"]
        
        # 重复训练(验证续训功能)
        logger.info("\n====== 续训测试 ======")
        retrainer = XGBModelTrainer(label_col="r1_next", data_dict=sample_data, load_version=version)
        retrain_result = retrainer.train()
        
        XGBModelTrainer.print_training_summary([result, retrain_result])
    except Exception as e:
        logger.critical(f"训练示例执行失败: {str(e)}\n{traceback.format_exc()}")