import numpy as np
import pandas as pd
import traceback
import lightgbm as lgb
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score
from config.ssq_config import logger, SSQ_CONFIG
import trainer.model_train.model_utils as model_utils
from trainer.model_train.common_train import BaseModelTrainer
from typing import Dict, Any
import os
import time


class LGBModelTrainer(BaseModelTrainer):
    """LightGBM模型训练器(支持续训+参数保存+与XGB结构对齐)"""
    def __init__(self, label_col: str, data_dict: Dict,load_version: str = None,run_type: str = 'train'):
        super().__init__(label_col, data_dict, model_type="lgb",load_version=load_version,run_type=run_type)
        self.train_ratio = SSQ_CONFIG['train_config'].get('train_ratio', 0.8)
        self.config_label_col = SSQ_CONFIG['label_cols']
        self.data_dict = data_dict      # 接受主进程传递的初始化数据
        
        # 数据存储变量
        self.X_train = None
        self.X_val = None
        self.y_train = None
        self.y_val = None
        self.feature_cols = None
        self.label_threshold = None
        
        self.lgb_device = "cpu"  # 强制CPU运行
        self.model = None  # 模型实例存储变量
        
        # 训练中间信息存储(与XGB结构完全一致)
        self.train_process_info["training_info"] = self.train_process_info.get("training_info", {})
        self.train_process_info["model_params"] = self.train_process_info.get("model_params", {})
        self.train_process_info["evaluation_metrics"] = self.train_process_info.get("evaluation_metrics", {})
        self.train_process_info["error"] = None
        self.train_process_info["error_traceback"] = None

    def _load_model(self) -> bool:
        """加载历史模型(支持续训), 与XGB逻辑对齐"""
        try:
            model_path = self.train_process_info.get("model_path", "")
            if not model_path or not os.path.exists(model_path):
                model_files = [f for f in os.listdir(self.model_process_dir) if f.endswith((".bin", ".txt"))]
                if model_files:
                    model_path = os.path.join(self.model_process_dir, model_files[0])
                else:
                    logger.warning(f"[{self.label_col}]未找到LGB模型文件, 将重新训练")
                    return False
            
            # 加载模型并设置设备
            logger.info(f'model_path: {model_path}')
            self.model = lgb.Booster(model_file=model_path)
            self.model.reset_parameter({"device": self.lgb_device})
            logger.info(f"[{self.label_col}]LGB模型加载成功: {model_path}")
            
            # 续训参数校验
            if self.check_version_exists and "model_params" not in self.train_process_info:
                logger.warning(f"[{self.label_col}]历史训练信息中未找到模型参数, 将使用新参数续训")
            
            return True
        except Exception as e:
            logger.error(f"[{self.label_col}]加载LGB模型失败: {str(e)}\n{traceback.format_exc()}")
            self.model = None
            return False

    def _init_data(self) -> bool:
        """数据初始化(与XGB方法名统一)"""
        try:
            start = time.time()
            logger.info(f"[{self.label_col}]_init_data开始 - {start:.3f}")
            # 数据赋值
            step1 = time.time()
            self.X_train = self.data_dict["X_train"]
            self.X_val = self.data_dict["X_val"]
            self.y_train = self.data_dict["y_train"]
            self.y_val = self.data_dict["y_val"]
            self.feature_cols = self.data_dict["feature_cols"]
            self.label_threshold = self.data_dict["threshold"]
            logger.info(f"[{self.label_col}]步骤1: 数据赋值完成 - 耗时{time.time()-step1:.3f}s")
            
            # 标签二值化(与XGB逻辑一致)
            step2 = time.time()
            self.y_train = np.where(self.y_train >= self.label_threshold, 1, 0)
            self.y_val = np.where(self.y_val >= self.label_threshold, 1, 0)
            logger.info(f"[{self.label_col}]步骤2: 二值化完成 - 耗时{time.time()-step2:.3f}s")

            # 续训特征一致性校验
            step3 = time.time()
            # 续训时校验特征一致性 + 按历史顺序对齐（非续训时跳过）
            if self.check_version_exists:
                saved_feat_cols = self.train_process_info["feature_info"].get("feature_columns", [])
                logger.info(f'saved_feat_cols:{saved_feat_cols}')
                logger.info(f'当前原始特征列:{self.feature_cols}')

                # 校验数量和列名一致性（原有逻辑不变）
                if len(saved_feat_cols) != len(self.feature_cols):
                    raise ValueError(f"__init_data(): 特征列不一致！历史{len(saved_feat_cols)}个, 当前{len(self.feature_cols)}个")
                
                if set(saved_feat_cols) != set(self.feature_cols):
                    missing_in_current = set(saved_feat_cols) - set(self.feature_cols)
                    missing_in_saved = set(self.feature_cols) - set(saved_feat_cols)
                    raise ValueError(
                        f"特征列名不一致！历史有但当前没有: {missing_in_current}\n"
                        f"当前有但历史没有: {missing_in_saved}"
                    )

                # 仅续训时：按历史特征列的顺序重新排列（确保与历史一致）
                self.feature_cols = [col for col in saved_feat_cols if col in self.feature_cols]
                logger.info(f"续训场景 - 特征列顺序已对齐：按历史顺序排列，共{len(self.feature_cols)}个特征")
            else:
                # 非续训场景（第一次训练）：直接保留原始特征列，不做排列
                logger.info(f"非续训场景 - 保留原始特征列：共{len(self.feature_cols)}个特征")

            logger.info(f"[{self.label_col}]步骤3: 特征校验完成 - 耗时{time.time()-step3:.3f}s")

            # 数据统计
            step4 = time.time()
            train_pos_count = int(np.sum(self.y_train == 1))
            train_neg_count = int(np.sum(self.y_train == 0))
            val_pos_count = int(np.sum(self.y_val == 1))
            val_neg_count = int(np.sum(self.y_val == 0))
            data_stats = {
                "train_set": {
                    "sample_count": len(self.X_train),
                    "feature_count": len(self.feature_cols),
                    "positive_count": train_pos_count,
                    "negative_count": train_neg_count,
                    "positive_ratio": round(train_pos_count / len(self.y_train), 4) if len(self.y_train) else 0.0
                },
                "val_set": {
                    "sample_count": len(self.X_val),
                    "feature_count": len(self.feature_cols),
                    "positive_count": val_pos_count,
                    "negative_count": val_neg_count,
                    "positive_ratio": round(val_pos_count / len(self.y_val), 4) if len(self.y_val) else 0.0
                }
            }
            self._save_data_statistics(data_stats)
            self._save_feature_columns(self.feature_cols)
            logger.info(f"[{self.label_col}]步骤4: 统计完成 - 耗时{time.time()-step4:.3f}s")            
            logger.info(f'[{self.label_col}]数据初始化完成 - 训练集shape: {self.X_train.shape}, 验证集shape: {self.X_val.shape}')
            return True
        except Exception as e:
            logger.error(f"[{self.label_col}]数据初始化失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def _check_label_distribution(self):
        """检查标签分布"""
        # 训练集检查
        train_counts = np.bincount(self.y_train)
        if len(train_counts) < 2:
            logger.warning(f"[{self.label_col}]训练集标签仅含1个类别！可能导致模型失效")
        else:
            train_pos_ratio = train_counts[1] / len(self.y_train)
            logger.info(f"[{self.label_col}]训练集正样本占比: {train_pos_ratio:.2%}")
        
        # 验证集检查
        val_counts = np.bincount(self.y_val)
        if len(val_counts) < 2:
            logger.warning(f"[{self.label_col}]验证集标签仅含1个类别！评估结果可能无效")
        else:
            val_pos_ratio = val_counts[1] / len(self.y_val)
            logger.info(f"[{self.label_col}]验证集正样本占比: {val_pos_ratio:.2%}")

    def find_optimal_threshold(self, y_true: np.ndarray, y_pred_proba: np.ndarray) -> tuple:
        """寻找最优分类阈值(与XGB逻辑一致)"""
        thresholds = np.arange(0.1, 0.9, 0.01)
        f1_scores = []
        for thresh in thresholds:
            y_pred = np.where(y_pred_proba >= thresh, 1, 0)
            f1 = f1_score(y_true, y_pred, average="binary")
            f1_scores.append(f1)
        
        best_idx = np.argmax(f1_scores)
        return thresholds[best_idx], f1_scores[best_idx]

    def evaluate_model(self, y_true: np.ndarray, y_pred: np.ndarray, y_pred_proba: np.ndarray) -> dict:
        """评估模型性能(与XGB指标一致)"""
        try:
            auc = roc_auc_score(y_true, y_pred_proba) if len(np.unique(y_true)) > 1 else 0.5
            return {
                "auc": round(auc, 4),
                "f1": round(f1_score(y_true, y_pred, average="binary"), 4),
                "precision": round(precision_score(y_true, y_pred, average="binary"), 4),
                "recall": round(recall_score(y_true, y_pred, average="binary"), 4)
            }
        except Exception as e:
            logger.error(f"[{self.label_col}]模型评估失败: {str(e)}")
            return {}

    def train(self) -> dict:
        """训练入口(支持续训, 与XGB结构对齐)"""
        logger.info(f"开始训练[LightGBM]- 目标标签: {self.label_col}, 设备: {self.lgb_device}")
        self.train_process_info["status"] = "running"
        self.train_process_info["device"] = self.lgb_device
        self._save_train_process_info()
        
        try:
            # 1. 初始化数据
            if not self._init_data():
                self.train_process_info["status"] = "fail"
                self.train_process_info["error"] = "数据初始化失败"
                self._save_train_process_info()
                return {"status": "fail", "label_col": self.label_col, "error": "数据初始化失败"}

            # 2. 加载历史模型(续训支持)
            if self.check_version_exists:
                self._load_model()
            logger.info(f'self.check_version_exists: {self.check_version_exists}')

            # 3. 特征列处理
            feature_cols = self.feature_cols if (hasattr(self, 'feature_cols') and self.feature_cols) else []
            if not feature_cols and hasattr(self.X_train, 'columns'):
                feature_cols = list(self.X_train.columns)
            logger.info(f"[{self.label_col}]训练特征列数: {len(feature_cols)}")

            # 4. 类别权重计算
            pos_count = len(self.y_train[self.y_train == 1])
            neg_count = len(self.y_train[self.y_train == 0])
            pos_weight = min(2.8, neg_count / pos_count) if pos_count > 0 else 1.0
            logger.info(f"[{self.label_col}]类别权重: 正样本={pos_count}, 负样本={neg_count}, 权重={pos_weight:.2f}")

            # 5. 模型参数配置(续训复用支持)
            if self.check_version_exists and "model_params" in self.train_process_info:
                # 续训复用历史参数
                lgb_params = self.train_process_info["model_params"]
                label_seed = lgb_params["seed"]
                lgb_params["device"] = self.lgb_device  # 保持设备一致性
                logger.info(f"[{self.label_col}]复用历史LGB参数, seed={label_seed}, 续训模式开启")
            else:
                # 新训参数
                label_seed = SSQ_CONFIG.get("random_seed", 42) + hash(self.label_col) % 1000
                lgb_params = {
                    "objective": "binary",
                    "metric": ["auc", "binary_logloss"],
                    "boosting_type": "gbdt",
                    "learning_rate": 0.06,
                    "max_depth": 9,
                    "scale_pos_weight": pos_weight,
                    "subsample": 0.7,
                    "colsample_bytree": 0.7,
                    "colsample_bynode": 0.8,
                    "seed": label_seed,
                    "verbosity": 0,
                    "device": self.lgb_device,
                    "max_bin": 256,
                    "num_leaves": 63,
                    "min_child_samples": 8,
                    "reg_alpha": 0.05,
                    "reg_lambda": 0.5,
                    "bagging_freq": 3,
                }
            
            # 保存模型参数(支持续训)
            self.train_process_info["model_params"] = lgb_params
            self._save_train_process_info()

            # 6. 构建数据集
            train_data = lgb.Dataset(self.X_train, label=self.y_train, free_raw_data=False)
            val_data = lgb.Dataset(self.X_val, label=self.y_val, reference=train_data, free_raw_data=False)
            logger.info(f"[{self.label_col}]数据集构建完成, 训练集shape: {self.X_train.shape}")

            # 7. 早停配置
            evals_result = {}
            early_stop_rounds = SSQ_CONFIG['train_config'].get("early_stopping_rounds", 30)
            callbacks = [
                lgb.early_stopping(stopping_rounds=early_stop_rounds, min_delta=0.0001, verbose=False),
                lgb.log_evaluation(period=20),
                lgb.record_evaluation(evals_result)  # 关键：用回调记录评估结果
            ]
            logger.info(f"[{self.label_col}]早停配置: {early_stop_rounds}轮无提升则停止, seed={label_seed}")

            # 8. 模型训练(续训支持)
            if self.check_version_exists and self.model is not None:
                history_iter = self.train_process_info["training_info"].get("total_iterations", 0)
                logger.info(f"[{self.label_col}]续训: 历史已训练{history_iter}棵树")

            # 初始化评估结果存储字典
            
            self.model = lgb.train(
                params=lgb_params,
                train_set=train_data,
                num_boost_round=1200,
                valid_sets=[val_data],
                callbacks=callbacks,
                init_model=self.model if (self.check_version_exists and self.model is not None) else None,
            )
            # 此时 evals_result 已被回调填充, 格式与 XGBoost 兼容
            self.train_process_info["evals_result"] = evals_result

            # 9. 迭代次数计算(LGB专属逻辑, 与XGB输出对齐)
            total_trees = self.model.num_trees()  # LGB获取总树数(1基)
            if self.model.best_iteration is not None:
                best_iteration_1based = self.model.best_iteration + 1  # 0基转1基
                total_iterations = best_iteration_1based
            else:
                best_iteration_1based = None
                total_iterations = total_trees

            # 保存训练中间信息(与XGB字段一致)
            self.train_process_info["training_info"] = {
                "total_iterations": total_iterations,
                "best_iteration": self.model.best_iteration,
                "best_iteration_1based": best_iteration_1based,
                "current_iteration": total_trees,  # 与XGB对齐
                "total_trees": total_trees,
                "early_stop_rounds": early_stop_rounds,
                "num_boost_round": 1200,
            }
            self.train_process_info["evals_result"] = evals_result
            self._save_train_process_info()

            logger.info(f"[{self.label_col}]训练完成, 最佳迭代轮次(0基): {self.model.best_iteration}, "
                        f"最佳AUC: {self.model.best_score['valid_0']['auc']:.4f}")

            # 10. 阈值计算与评估
            y_pred_proba = self.model.predict(self.X_val, num_iteration=self.model.best_iteration)
            threshold, best_f1 = self.find_optimal_threshold(self.y_val, y_pred_proba)
            logger.info(f"[{self.label_col}]最优阈值: {threshold:.4f}(F1={best_f1:.4f})")
            
            self._save_threshold({
                "optimal_threshold": round(threshold, 4),
                "best_f1_score": round(best_f1, 4)
            })
            
            y_pred = np.where(y_pred_proba >= threshold, 1, 0)
            metrics = self.evaluate_model(self.y_val, y_pred, y_pred_proba)
            logger.info(f"[{self.label_col}]评估结果: AUC={metrics['auc']:.4f}, F1={metrics['f1']:.4f}")

            # 11. 特征重要性
            if SSQ_CONFIG.get("log_feature_importance", False):
                importance = self.model.feature_importance(importance_type='gain')
                top_indices = np.argsort(importance)[-5:][::-1]
                top_feats = [(self.feature_cols[i], importance[i]) for i in top_indices if i < len(self.feature_cols)]
                logger.info(f"[{self.label_col}]Top5特征(增益): {top_feats}")

            # 12. 模型保存
            model_name = f"lgb_{self.label_col}"
            save_path = model_utils.save_model(
                model=self.model,
                model_name=model_name,
                model_type="lgb",
                feature_cols=feature_cols,
                model_params=lgb_params,
                threshold=threshold,
                version=self.train_version
            )
            
            # 13. 保存最终信息
            self.train_process_info["model_path"] = save_path
            self.train_process_info["evaluation_metrics"] = metrics
            self.train_process_info["status"] = "success"
            self._save_train_process_info()

            if save_path:
                logger.info(f"[{self.label_col}]模型保存成功: {save_path}")
            else:
                logger.warning(f"[{self.label_col}]模型保存失败")
        
            # 返回结果(与XGB格式一致)
            logger.info(f"[LGB-def train()]-[{self.label_col}]训练完成, 返回设备: {self.lgb_device}")
            return {
                "status": "success",
                "label_col": self.label_col,
                "metrics": metrics,
                "model_path": save_path,
                "version": self.train_version,
                "device": self.lgb_device,
                "total_iterations": total_iterations,
                "best_iteration": self.model.best_iteration,
            }

        except Exception as e:
            # 异常处理(与XGB逻辑一致)
            error_trace = traceback.format_exc()
            logger.error(f"[LGB-def train()]-[{self.label_col}]训练失败: {str(e)}\n{error_trace}")
            
            self.train_process_info["status"] = "fail"
            self.train_process_info["error"] = str(e)
            self.train_process_info["error_traceback"] = error_trace
            self._save_train_process_info()
            
            return {
                "status": "fail",
                "label_col": self.label_col,
                "error": f"{str(e)}\n{error_trace}"
            }
    
    @staticmethod
    def print_training_summary(all_results: list):
        """打印训练汇总(与XGB格式一致)"""
        logger.info("="*100)
        logger.info("                      LightGBM 训练结果汇总表格")
        logger.info("="*100)
        
        summary_data = []
        version = 'unknown'
        for res in all_results:
            label_col = res["label_col"]
            status = res["status"]
            metrics = res.get("metrics", {})
            train_device = res.get("device", "cpu")
            version = res.get("version", "unknown")
            best_iter = res.get("best_iteration", "N/A")
            total_iter = res.get("total_iterations", "N/A")
            
            summary_data.append({
                "标签列": label_col,
                "训练版本": version,
                "训练设备": train_device,
                "累计总树数": total_iter,
                "最佳树数量": best_iter,
                "AUC": metrics.get("auc", 0.0),
                "F1分数": metrics.get("f1", 0.0),
                "精确率": metrics.get("precision", 0.0),
                "召回率": metrics.get("recall", 0.0),
                "训练状态": status
            })
        
        summary_df = pd.DataFrame(summary_data)
        numeric_cols = ["AUC", "F1分数", "精确率", "召回率"]
        for col in numeric_cols:
            summary_df[col] = summary_df[col].round(4)
        
        logger.info("\n" + summary_df.to_string(index=False, col_space=10))
        
        # 统计信息
        total = len(all_results)
        success_count = sum(1 for res in all_results if res["status"] == "success")
        success_rate = (success_count / total) * 100 if total > 0 else 0.0
        avg_auc = summary_df[summary_df["训练状态"] == "success"]["AUC"].mean() if success_count > 0 else 0.0
        avg_f1 = summary_df[summary_df["训练状态"] == "success"]["F1分数"].mean() if success_count > 0 else 0.0
        
        logger.info("-"*100)
        logger.info(f"训练统计: 总标签数={total} | 成功数={success_count} | 成功率={success_rate:.2f}%")
        logger.info(f"平均性能(成功模型): 平均AUC={avg_auc:.4f} | 平均F1={avg_f1:.4f}")
        logger.info("="*100 + "\n")
        
        # 屏幕打印
        print("="*100)
        print("                      LightGBM 训练结果汇总表格")
        print("="*100)
        print(summary_df.to_string(index=False, col_space=8))
        print("-"*100)
        print(f"训练统计: 总标签数={total} | 成功数={success_count} | 成功率={success_rate:.2f}%")
        print(f"平均性能(成功模型): 平均AUC={avg_auc:.4f} | 平均F1={avg_f1:.4f}")
        print("="*100 + "\n")


if __name__ == "__main__":
    # 示例测试代码(验证续训功能)
    try:
        sample_data = {
            "X_train": np.random.rand(1000, 50),
            "X_val": np.random.rand(200, 50),
            "y_train": np.random.randint(0, 2, 1000),
            "y_val": np.random.randint(0, 2, 200),
            "feature_cols": [f"feat_{i}" for i in range(50)],
            "threshold": 0.5
        }
        
        # 首次训练
        logger.info("====== 首次训练 ======")
        trainer = LGBModelTrainer(label_col="r1_next", data_dict=sample_data)
        result = trainer.train()
        version = result["version"]
        
        # 续训测试
        logger.info("\n====== 续训测试 ======")
        retrainer = LGBModelTrainer(label_col="r1_next", data_dict=sample_data, load_version=version)
        retrain_result = retrainer.train()
        
        LGBModelTrainer.print_training_summary([result, retrain_result])
    except Exception as e:
        logger.critical(f"测试失败: {str(e)}\n{traceback.format_exc()}")