import numpy as np
import pandas as pd
import traceback
import os
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score
from config.ssq_config import logger, SSQ_CONFIG
import trainer.model_train.model_utils as model_utils
from trainer.model_train.common_train import BaseModelTrainer
from typing import Dict

'''
核心结论：优化参数已生效！平均 AUC 从 0.5001 提升至 0.5023, 高价值标签数量增加, 模型稳定性依旧拉满(33/33 成功), 虽整体提升幅度不大, 但完全达到 “补充 XGB/LGB 预测盲区” 的目标, 实用价值显著。
'''

class RandomForestTrainer(BaseModelTrainer):
    """随机森林模型训练器(适配双色球预测+多进程共享特征矩阵+CPU优化+统一接口+续训支持)"""
    def __init__(self, label_col: str, data_dict: Dict,load_version: str = None,run_type: str = 'train'):
        super().__init__(label_col, data_dict, model_type="rf",load_version=load_version,run_type=run_type)
        self.label_col = label_col  # 目标标签列名
        self.data_dict = data_dict  # 接受主进程传递的初始化数据
        self.config_label_col = SSQ_CONFIG['label_cols']  # 所有标签列(与XGB一致)
        self.train_ratio = SSQ_CONFIG['train_config'].get('train_ratio', 0.8)  # 训练集占比
        
        # 数据存储变量
        self.feat_df = None  # 外部传入的共享特征矩阵
        self.X_train = None  # 训练集特征
        self.X_val = None    # 验证集特征
        self.y_train = None  # 训练集标签
        self.y_val = None    # 验证集标签
        self.feature_cols = None  # 有效特征列名
        self.label_threshold = None  # 标签阈值
        
        self.rf_device = "cpu"  # 强制固定为CPU运行
        # 初始化续训相关存储
        self.train_process_info["training_info"] = self.train_process_info.get("training_info", {})
        self.train_process_info["model_type"] = "rf"  # 标记模型类型, 用于续训校验

    def _load_model(self) -> bool:
        """加载模型(适配续训, 与XGB加载逻辑对齐)"""
        try:
            model_path = self.train_process_info.get("model_path", "")
            if not model_path or not os.path.exists(model_path):
                # 从模型目录查找sklearn格式模型文件
                model_files = [f for f in os.listdir(self.model_process_dir) if f.endswith(".pkl") and f.startswith(f"rf_{self.label_col}")]
                if model_files:
                    model_path = os.path.join(self.model_process_dir, model_files[0])
                else:
                    logger.warning(f"[{self.label_col}]未找到RF模型文件, 将重新训练")
                    return False
            
            # 加载模型本体
            self.model = model_utils.load_model(model_path, model_type="sklearn")
            logger.info(f"[{self.label_col}]RF模型加载成功: {model_path}")
            
            # 续训校验：检查历史参数是否存在
            if self.check_version_exists and "model_params" not in self.train_process_info:
                logger.warning(f"[{self.label_col}]历史训练信息中未找到模型参数, 将使用新参数续训")
            # 校验模型类型一致性
            if self.train_process_info.get("model_type") != "rf":
                raise ValueError(f"历史参数属于{self.train_process_info.get('model_type')}, 与RF不兼容")
            
            return True
        except Exception as e:
            logger.error(f"[{self.label_col}]加载RF模型失败: {str(e)}")
            self.model = None
            return False

    def init_data(self) -> bool:
        """初始化数据(新增续训特征一致性校验)"""
        try:
            # 从数据字典中赋值给模型实例变量
            self.X_train = self.data_dict["X_train"]
            self.X_val = self.data_dict["X_val"]
            self.y_train = self.data_dict["y_train"]
            self.y_val = self.data_dict["y_val"]
            self.feature_cols = self.data_dict["feature_cols"]
            self.label_threshold = self.data_dict["threshold"]  # 保存阈值, 后续预测用
            
            # 续训时校验特征一致性 + 按历史顺序对齐（非续训时跳过）
            if self.check_version_exists:
                saved_feat_cols = self.train_process_info["feature_info"].get("feature_columns", [])
                logger.info(f'saved_feat_cols:{saved_feat_cols}')
                logger.info(f'当前原始特征列:{self.feature_cols}')

                # 校验数量和列名一致性（原有逻辑不变）
                if len(saved_feat_cols) != len(self.feature_cols):
                    raise ValueError(f"__init_data(): 特征列不一致！历史{len(saved_feat_cols)}个, 当前{len(self.feature_cols)}个")
                
                if set(saved_feat_cols) != set(self.feature_cols):
                    missing_in_current = set(saved_feat_cols) - set(self.feature_cols)
                    missing_in_saved = set(self.feature_cols) - set(saved_feat_cols)
                    raise ValueError(
                        f"特征列名不一致！历史有但当前没有: {missing_in_current}\n"
                        f"当前有但历史没有: {missing_in_saved}"
                    )

                # 仅续训时：按历史特征列的顺序重新排列（确保与历史一致）
                self.feature_cols = [col for col in saved_feat_cols if col in self.feature_cols]
                logger.info(f"续训场景 - 特征列顺序已对齐：按历史顺序排列，共{len(self.feature_cols)}个特征")
            else:
                # 非续训场景（第一次训练）：直接保留原始特征列，不做排列
                logger.info(f"非续训场景 - 保留原始特征列：共{len(self.feature_cols)}个特征")
            
            logger.info(f'self.X_train.shape: {self.X_train.shape}')
            logger.info(f'self.X_val.shape: {self.X_val.shape}')
            logger.info(f'self.y_train.shape: {self.y_train.shape}')
            logger.info(f'self.y_val.shape: {self.y_val.shape}')
            logger.info(f'self.label_threshold: {self.label_threshold}')
            
            # 保存数据统计信息到训练过程(供续训追溯)
            self._save_feature_columns(self.feature_cols)
            train_pos_count = int(np.sum(self.y_train == 1))
            train_neg_count = int(np.sum(self.y_train == 0))
            data_stats = {
                "train_set": {"positive_count": train_pos_count, "negative_count": train_neg_count, "sample_count": len(self.y_train)},
                "val_set": {"positive_count": int(np.sum(self.y_val == 1)), "negative_count": int(np.sum(self.y_val == 0)), "sample_count": len(self.y_val)}
            }
            self._save_data_statistics(data_stats)
            
            return True
        except Exception as e:
            logger.error(f"[{self.label_col}]模型数据初始化失败: {str(e)}")
            return False

    def _check_label_distribution(self):
        """检查标签分布(完全对齐XGB逻辑)"""
        # 训练集标签检查
        train_counts = np.bincount(self.y_train)
        if len(train_counts) < 2:
            logger.warning(f"[{self.label_col}]训练集标签仅含1个类别！可能导致模型失效")
        else:
            train_pos_ratio = train_counts[1] / len(self.y_train)
            logger.info(f"[{self.label_col}]训练集正样本占比: {train_pos_ratio:.2%}")
        
        # 验证集标签检查
        val_counts = np.bincount(self.y_val)
        if len(val_counts) < 2:
            logger.warning(f"[{self.label_col}]验证集标签仅含1个类别！评估结果可能无效")
        else:
            val_pos_ratio = val_counts[1] / len(self.y_val)
            logger.info(f"[{self.label_col}]验证集正样本占比: {val_pos_ratio:.2%}")

    def find_optimal_threshold(self, y_true: np.ndarray, y_pred_proba: np.ndarray) -> tuple:
        """寻找最优分类阈值(基于F1分数最大化)"""
        thresholds = np.arange(0.1, 0.9, 0.01)
        f1_scores = []
        for thresh in thresholds:
            y_pred = np.where(y_pred_proba >= thresh, 1, 0)
            f1 = f1_score(y_true, y_pred, average="binary")
            f1_scores.append(f1)
        
        best_idx = np.argmax(f1_scores)
        return thresholds[best_idx], f1_scores[best_idx]

    def evaluate_model(self, y_true: np.ndarray, y_pred: np.ndarray, y_pred_proba: np.ndarray) -> dict:
        """评估模型性能(与XGB保持一致的评估指标)"""
        try:
            auc = roc_auc_score(y_true, y_pred_proba[:, 1]) if len(np.unique(y_true)) > 1 else 0.5
            return {
                "auc": round(auc, 4),
                "f1": round(f1_score(y_true, y_pred, average="binary"), 4),
                "precision": round(precision_score(y_true, y_pred, average="binary"), 4),
                "recall": round(recall_score(y_true, y_pred, average="binary"), 4)
            }
        except Exception as e:
            logger.error(f"[{self.label_col}]模型评估失败: {str(e)}")
            return {}

    def train(self) -> dict:
        """训练入口(支持续训和中间参数保存)"""
        logger.info(f"开始训练[RandomForest] - 目标标签: {self.label_col}, 强制使用CPU")
        self.train_process_info["status"] = "running"
        self.train_process_info["device"] = self.rf_device
        self._save_train_process_info()  # 初始化训练状态保存
        
        try:
            if not self.init_data():
                self.train_process_info["status"] = "fail"
                self.train_process_info["error"] = "数据初始化失败"
                self._save_train_process_info()
                return {"status": "fail", "label_col": self.label_col, "error": "数据初始化失败"}

            pos_count = len(self.y_train[self.y_train == 1])
            neg_count = len(self.y_train[self.y_train == 0])
            logger.info(f"[{self.label_col}]类别分布: 正样本数={pos_count}, 负样本数={neg_count}")

            # 核心参数配置(支持续训复用)
            label_seed = SSQ_CONFIG.get("random_seed", 42) + hash(self.label_col) % 100
            if not self.check_version_exists or "model_params" not in self.train_process_info:
                # 新训：使用优化参数
                rf_params = {
                    "n_estimators": 300,
                    "max_depth": 12,
                    "min_samples_split": 8,
                    "min_samples_leaf": 4,
                    "class_weight": "balanced",
                    "max_features": "sqrt",
                    "bootstrap": True,
                    "oob_score": True,
                    "n_jobs": 1,
                    "random_state": label_seed,
                    "verbose": 0,
                }
                logger.info(f"[{self.label_col}]使用新训练参数, 专属seed={label_seed}")
            else:
                # 续训：复用历史参数
                rf_params = self.train_process_info["model_params"]
                # 补全缺失参数(兼容旧版本模型)
                default_params = {
                    "n_estimators": 300,
                    "max_depth": 12,
                    "min_samples_split": 8,
                    "min_samples_leaf": 4,
                    "class_weight": "balanced",
                    "max_features": "sqrt",
                    "bootstrap": True,
                    "oob_score": True,
                    "n_jobs": 1,
                    "random_state": label_seed,
                    "verbose": 0,
                }
                for key, val in default_params.items():
                    if key not in rf_params:
                        rf_params[key] = val
                label_seed = rf_params["random_state"]
                logger.info(f"[{self.label_col}]复用历史RF参数, seed={label_seed}, 续训模式开启")

            # 保存参数到训练过程信息
            self.train_process_info["model_params"] = rf_params
            self._save_train_process_info()  # 即时保存参数, 防止中断丢失

            # 确认训练使用的特征列
            feature_cols = self.feature_cols if hasattr(self, 'feature_cols') and self.feature_cols else []
            if not feature_cols and hasattr(self.X_train, 'columns'):
                feature_cols = list(self.X_train.columns)
            logger.info(f"[{self.label_col}]训练使用的特征列数: {len(feature_cols)}")

            # 初始化并训练模型(支持续训)
            logger.info(f"[{self.label_col}]开始随机森林训练(设备: {self.rf_device}, 核心参数: {rf_params})")
            if self.model is None:
                self.model = RandomForestClassifier(** rf_params)
                logger.info(f"[{self.label_col}]初始化新RF模型")
            else:
                logger.info(f"[{self.label_col}]使用已有模型续训(基于历史{len(self.model.estimators_)}棵树)")
                # 续训时增加新树(保持原有树不变, 新增n_estimators棵树)
                self.model.set_params(warm_start=True, n_estimators=rf_params["n_estimators"])

            # 执行训练
            self.model.fit(self.X_train, self.y_train)
            logger.info(f"[{self.label_col}]随机森林训练完成: 决策树总数={len(self.model.estimators_)}, 袋外分数={self.model.oob_score_:.4f}")

            # 保存训练中间信息
            self.train_process_info["training_info"] = {
                "n_estimators": len(self.model.estimators_),
                "oob_score": self.model.oob_score_,
                "feature_count": len(feature_cols),
                "random_seed": label_seed
            }
            self._save_train_process_info()  # 保存中间训练状态

            # 预测与最优阈值计算
            logger.info(f"[{self.label_col}]开始计算预测概率与最优阈值...")
            y_pred_proba = self.model.predict_proba(self.X_val)
            threshold, best_f1 = self.find_optimal_threshold(self.y_val, y_pred_proba[:, 1])
            logger.info(f"[{self.label_col}]最优F1阈值: {threshold:.4f}(F1分数: {best_f1:.4f})")
            
            # 保存最优阈值
            self._save_threshold({
                "optimal_threshold": round(threshold, 4),
                "best_f1_score": round(best_f1, 4)
            })

            # 计算评估指标
            y_pred = np.where(y_pred_proba[:, 1] >= threshold, 1, 0)
            metrics = self.evaluate_model(self.y_val, y_pred, y_pred_proba)
            self.train_process_info["evaluation_metrics"] = metrics
            logger.info(f"[{self.label_col}]评估结果计算完成: AUC={metrics['auc']:.4f}, F1={metrics['f1']:.4f}")

            # 特征重要性(保持原逻辑)
            if SSQ_CONFIG.get("log_feature_importance", False):
                importance_dict = dict(zip(self.feature_cols, self.model.feature_importances_))
                top_feats = sorted(importance_dict.items(), key=lambda x: x[1], reverse=True)[:5]
                logger.info(f"[{self.label_col}]Top5重要特征: {top_feats}")

            # 保存模型
            model_name = f"rf_{self.label_col}"
            logger.info(f"[{self.label_col}]开始保存模型: {model_name}")
            # 整理随机森林训练超参数
            rf_model_params = {
                **rf_params,
                "oob_score": self.model.oob_score_,
                "feature_count": len(feature_cols),
                "pos_count": pos_count,
                "neg_count": neg_count
            }
            save_path = model_utils.save_model(
                model=self.model,
                model_name=model_name,
                model_type="sklearn",
                feature_cols=feature_cols,
                model_params=rf_model_params,
                threshold=threshold,
                version=self.train_version,
            )
            if save_path:
                logger.info(f"[{self.label_col}]模型保存成功: {save_path}")
            else:
                logger.warning(f"[{self.label_col}]模型保存失败")

            # 保存最终训练状态
            self.train_process_info["model_path"] = save_path
            self.train_process_info["status"] = "success"
            self._save_train_process_info()

            logger.info(f"[RF-def train()]-[{self.label_col}]训练完成, 返回设备: {self.rf_device}")
            return {
                "status": "success",
                "label_col": self.label_col,
                "metrics": metrics,
                "device": self.rf_device,
                "n_estimators": len(self.model.estimators_),
                "version": self.train_version
            }

        except Exception as e:
            logger.error(f"[RF-def train()][{self.label_col}]训练失败: {str(e)}\n{traceback.format_exc()}")
            self.train_process_info["status"] = "fail"
            self.train_process_info["error"] = str(e)
            self.train_process_info["error_traceback"] = traceback.format_exc()  # 保存错误堆栈
            self._save_train_process_info()
            return {
                "status": "fail",
                "label_col": self.label_col,
                "error": str(e)
            }
    
    @staticmethod
    def print_training_summary(all_results: list):
        """打印训练汇总表格(完全对齐XGB格式)"""
        logger.info("="*100)
        logger.info("                      随机森林 训练结果汇总表格")
        logger.info("="*100)
        
        summary_data = []
        version = "unknown"
        for res in all_results:
            train_device = res.get("device", "cpu")
            metrics = res.get("metrics", {})
            version = res.get("version", "unknown")
            summary_data.append({
                "标签列": res["label_col"],
                "训练版本": version,
                "训练设备": train_device,
                "决策树数量": res.get("n_estimators", "N/A"),
                "AUC": metrics.get("auc", 0.0),
                "F1分数": metrics.get("f1", 0.0),
                "精确率": metrics.get("precision", 0.0),
                "召回率": metrics.get("recall", 0.0),
                "训练状态": res["status"]
            })
        
        summary_df = pd.DataFrame(summary_data)
        for col in ["AUC", "F1分数", "精确率", "召回率"]:
            summary_df[col] = summary_df[col].round(4)
        
        logger.info("\n" + summary_df.to_string(index=False, col_space=10))
        logger.info("-"*100)
        total = len(all_results)
        success_count = sum(1 for r in all_results if r["status"] == "success")
        success_rate = (success_count / total) * 100 if total > 0 else 0.0
        avg_auc = summary_df[summary_df["训练状态"] == "success"]["AUC"].mean() if success_count > 0 else 0.0
        avg_f1 = summary_df[summary_df["训练状态"] == "success"]["F1分数"].mean() if success_count > 0 else 0.0
        logger.info(f"训练统计: 总标签数={total} | 成功数={success_count} | 成功率={success_rate:.2f}%")
        logger.info(f"平均性能(成功模型): 平均AUC={avg_auc:.4f} | 平均F1={avg_f1:.4f}")
        logger.info("="*100 + "\n")
        
        # 屏幕打印(与XGB格式一致)
        print("="*100)
        print("                      随机森林 训练结果汇总表格")
        print("="*100)
        print(summary_df.to_string(index=False, col_space=8))
        print("-"*100)
        print(f"训练统计: 总标签数={total} | 成功数={success_count} | 成功率={success_rate:.2f}%")
        print(f"平均性能(成功模型): 平均AUC={avg_auc:.4f} | 平均F1={avg_f1:.4f}")
        print("="*100 + "\n")


if __name__ == "__main__":
    # 示例运行代码
    try:
        # 生成示例数据
        sample_data = {
            "X_train": np.random.rand(1000, 50),
            "X_val": np.random.rand(200, 50),
            "y_train": np.random.randint(0, 2, 1000),
            "y_val": np.random.randint(0, 2, 200),
            "feature_cols": [f"feat_{i}" for i in range(50)],
            "threshold": 0.5
        }
        
        # 首次训练
        logger.info("====== 首次训练 ======")
        trainer = RandomForestTrainer(label_col="r1_next", data_dict=sample_data)
        result = trainer.train()
        version = result["version"]
        
        # 续训(加载已有版本)
        logger.info("\n====== 续训 ======")
        retrainer = RandomForestTrainer(label_col="r1_next", data_dict=sample_data, load_version=version)
        retrain_result = retrainer.train()
        
        # 打印汇总
        RandomForestTrainer.print_training_summary([result, retrain_result])
    except Exception as e:
        logger.critical(f"示例运行失败: {str(e)}\n{traceback.format_exc()}")
    