import numpy as np
import pandas as pd
import traceback
import os
from sklearn.linear_model import LogisticRegression
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import roc_auc_score, f1_score, precision_score, recall_score
from config.ssq_config import logger, SSQ_CONFIG
import trainer.model_train.model_utils as model_utils
from trainer.model_train.common_train import BaseModelTrainer
from typing import Dict, Any
import joblib


class LogisticRegressionTrainer(BaseModelTrainer):
    """逻辑回归模型训练器(完全对齐XGB结构, 继承公共基类)"""
    def __init__(self, label_col: str, data_dict: Dict,load_version: str = None,run_type: str = 'train'):
        super().__init__(label_col, data_dict, model_type="lstm",load_version=load_version,run_type=run_type)
    
        # 保留LR所有原始数据变量
        self.X_train = None
        self.X_val = None
        self.X_train_scaled = None
        self.X_val_scaled = None
        self.y_train = None
        self.y_val = None
        self.feature_cols = None
        self.label_threshold = None
        self.scaler = StandardScaler()  # 线性模型必需的标准化器
        self.lr_device = "cpu"  # LR强制使用CPU
        # 新增：初始化训练中间参数存储
        self.train_process_info["training_info"] = self.train_process_info.get("training_info", {})
        self.train_process_info["model_type"] = "lr"  # 标记模型类型, 用于续训校验

    def _load_model(self) -> bool:
        """加载模型(与XGB加载逻辑对齐, 新增续训参数校验)"""
        try:
            model_path = self.train_process_info.get("model_path", "")
            if not model_path or not os.path.exists(model_path):
                # 从模型目录查找.pkl文件
                model_files = [f for f in os.listdir(self.model_process_dir) if f.endswith(".pkl")]
                if model_files:
                    model_path = os.path.join(self.model_process_dir, model_files[0])
                else:
                    logger.warning(f"[{self.label_col}]未找到LR模型文件, 将重新训练")
                    return False
            
            self.model = model_utils.load_model(model_path, model_type="sklearn")
            # 新增：续训时加载标准化器(保证数据预处理一致性)
            scaler_path = model_path.replace(".pkl", "_scaler.pkl")
            if os.path.exists(scaler_path):
                self.scaler = model_utils.load_model(scaler_path, model_type="sklearn")
                logger.info(f"[{self.label_col}]LR标准化器加载成功: {scaler_path}")
            else:
                logger.warning(f"[{self.label_col}]未找到历史标准化器, 将重新初始化")
            
            # 新增：续训时校验模型参数是否存在
            if self.check_version_exists and "model_params" not in self.train_process_info:
                logger.warning(f"[{self.label_col}]历史训练信息中未找到模型参数, 将使用新参数续训")
            
            logger.info(f"[{self.label_col}]LR模型加载成功: {model_path}")
            return True
        except Exception as e:
            logger.error(f"[{self.label_col}]加载LR模型失败: {str(e)}\n{traceback.format_exc()}")
            self.model = None
            return False

    def _init_data(self) -> bool:
        """初始化数据(保留原逻辑, 调用基类文件保存方法)"""
        try:
            # 从数据字典赋值(与原逻辑完全一致)
            self.X_train = self.data_dict["X_train"]
            self.X_val = self.data_dict["X_val"]
            self.y_train = self.data_dict["y_train"]
            self.y_val = self.data_dict["y_val"]
            self.feature_cols = self.data_dict["feature_cols"]
            self.label_threshold = self.data_dict["threshold"]
            
            logger.info(f"[{self.label_col}]数据形状: X_train={self.X_train.shape}, X_val={self.X_val.shape}")
            logger.info(f"[{self.label_col}]标签阈值: {self.label_threshold}")

            # 续训时校验特征一致性 + 按历史顺序对齐（非续训时跳过）
            if self.check_version_exists:
                saved_feat_cols = self.train_process_info["feature_info"].get("feature_columns", [])
                logger.info(f'saved_feat_cols:{saved_feat_cols}')
                logger.info(f'当前原始特征列:{self.feature_cols}')

                # 校验数量和列名一致性（原有逻辑不变）
                if len(saved_feat_cols) != len(self.feature_cols):
                    raise ValueError(f"__init_data(): 特征列不一致！历史{len(saved_feat_cols)}个, 当前{len(self.feature_cols)}个")
                
                if set(saved_feat_cols) != set(self.feature_cols):
                    missing_in_current = set(saved_feat_cols) - set(self.feature_cols)
                    missing_in_saved = set(self.feature_cols) - set(saved_feat_cols)
                    raise ValueError(
                        f"特征列名不一致！历史有但当前没有: {missing_in_current}\n"
                        f"当前有但历史没有: {missing_in_saved}"
                    )

                # 仅续训时：按历史特征列的顺序重新排列（确保与历史一致）
                self.feature_cols = [col for col in saved_feat_cols if col in self.feature_cols]
                logger.info(f"续训场景 - 特征列顺序已对齐：按历史顺序排列，共{len(self.feature_cols)}个特征")
            else:
                # 非续训场景（第一次训练）：直接保留原始特征列，不做排列
                logger.info(f"非续训场景 - 保留原始特征列：共{len(self.feature_cols)}个特征")

            # 计算并保存数据统计(调用基类方法)
            train_pos_count = int(np.sum(self.y_train == 1))
            train_neg_count = int(np.sum(self.y_train == 0))
            val_pos_count = int(np.sum(self.y_val == 1))
            val_neg_count = int(np.sum(self.y_val == 0))
            data_stats = {
                "train_set": {
                    "sample_count": len(self.X_train),
                    "feature_count": len(self.feature_cols),
                    "positive_count": train_pos_count,
                    "negative_count": train_neg_count,
                    "positive_ratio": round(train_pos_count / len(self.y_train), 4) if len(self.y_train) else 0.0
                },
                "val_set": {
                    "sample_count": len(self.X_val),
                    "feature_count": len(self.feature_cols),
                    "positive_count": val_pos_count,
                    "negative_count": val_neg_count,
                    "positive_ratio": round(val_pos_count / len(self.y_val), 4) if len(self.y_val) else 0.0
                }
            }
            self._save_data_statistics(data_stats)
            self._save_feature_columns(self.feature_cols)

            return True
        except Exception as e:
            logger.error(f"[{self.label_col}]数据初始化失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def find_optimal_threshold(self, y_true: np.ndarray, y_pred_proba: np.ndarray) -> tuple:
        """寻找最优阈值(与XGB逻辑一致)"""
        thresholds = np.arange(0.1, 0.9, 0.01)
        f1_scores = []
        for thresh in thresholds:
            y_pred = np.where(y_pred_proba >= thresh, 1, 0)
            f1 = f1_score(y_true, y_pred, average="binary")
            f1_scores.append(f1)
        
        best_idx = np.argmax(f1_scores)
        return thresholds[best_idx], f1_scores[best_idx]

    def evaluate_model(self, y_true: np.ndarray, y_pred: np.ndarray, y_pred_proba: np.ndarray) -> dict:
        """评估模型性能(与XGB指标一致)"""
        try:
            auc = roc_auc_score(y_true, y_pred_proba[:, 1]) if len(np.unique(y_true)) > 1 else 0.5
            return {
                "auc": round(auc, 4),
                "f1": round(f1_score(y_true, y_pred, average="binary"), 4),
                "precision": round(precision_score(y_true, y_pred, average="binary"), 4),
                "recall": round(recall_score(y_true, y_pred, average="binary"), 4)
            }
        except Exception as e:
            logger.error(f"[{self.label_col}]模型评估失败: {str(e)}\n{traceback.format_exc()}")
            return {}

    def train(self) -> Dict[str, Any]:
        """训练主逻辑(保留所有LR核心逻辑, 对齐XGB返回格式, 新增续训支持)"""
        logger.info(f"开始训练[逻辑回归-LogisticRegression] - 目标标签: {self.label_col}, 设备: {self.lr_device}")
        self.train_process_info["status"] = "running"
        self.train_process_info["device"] = self.lr_device
        self._save_train_process_info()  # 调用基类保存过程信息
        
        try:
            # 1. 初始化数据
            if not self._init_data():
                return {"status": "fail", "label_col": self.label_col, "error": "数据初始化失败"}
            
            # 2. 标签二值化(保留原逻辑)
            unique_y = np.unique(self.y_train)
            if not np.array_equal(unique_y, [0]) and not np.array_equal(unique_y, [0, 1]):
                logger.warning(f"[{self.label_col}]标签非二值化！当前唯一值: {unique_y}")
                self.y_train = np.where(self.y_train >= self.label_threshold, 1, 0)
                self.y_val = np.where(self.y_val >= self.label_threshold, 1, 0)

            # 3. 类别权重计算
            pos_count = len(self.y_train[self.y_train == 1])
            neg_count = len(self.y_train[self.y_train == 0])
            logger.info(f"[{self.label_col}]类别分布: 正样本={pos_count}, 负样本={neg_count}")
            class_weight = {0: 1.0, 1: neg_count / pos_count} if pos_count > 0 else None

            # 4. LR参数配置(增强续训兼容, 保存完整参数)
            logger.info(f'self.check_version_exists: {self.check_version_exists}')
            logger.info(f'self.train_process_info: {self.train_process_info}')
            label_seed = SSQ_CONFIG.get("random_seed", 42)
            if not self.check_version_exists or "model_params" not in self.train_process_info:
                # 新训：生成标签专属种子
                label_seed += hash(self.label_col) % 1000
                lr_params = {
                    "penalty": "l2",
                    "C": 0.5,
                    "class_weight": class_weight,
                    "solver": "saga",
                    "max_iter": 1500,
                    "tol": 1e-4,
                    "random_state": label_seed,
                    "verbose": 0,
                    "n_jobs": 1,
                }
                logger.info(f"[{self.label_col}]使用新训练参数, 专属seed={label_seed}")
            else:
                # 续训：复用历史参数, 兼容缺失字段
                lr_params = self.train_process_info["model_params"]
                # 关键：将class_weight的字符串键转为整数键
                if "class_weight" in lr_params and lr_params["class_weight"] is not None:
                    lr_params["class_weight"] = { int(k): v for k, v in lr_params["class_weight"].items() }
                # 校验模型类型, 避免加载其他模型参数
                if self.train_process_info.get("model_type") != "lr":
                    raise ValueError(f"历史参数属于{self.train_process_info.get('model_type')}, 与LR不兼容")
                label_seed = lr_params.get("random_state", label_seed)  # 防止KeyError
                logger.info(f"[{self.label_col}]复用历史LR参数, seed={label_seed}, 续训模式开启")

            # 新增：保存完整参数到训练过程信息(支持续训复用)
            self.train_process_info["model_params"] = lr_params
            self._save_train_process_info()  # 即时保存参数, 防止训练中断丢失

            # 5. 特征优化(删除常量+随机采样)  将特征从 77个 缩减到了 54个
            logger.info(f"[{self.label_col}]特征预处理：删除常量特征+随机采样...")
            train_std = np.std(self.X_train, axis=0)
            non_constant_cols = train_std > 1e-6
            self.X_train = self.X_train[:, non_constant_cols]
            self.X_val = self.X_val[:, non_constant_cols]
            logger.info(f"[{self.label_col}]删除常量特征后：剩余{self.X_train.shape[1]}个特征")

            # 特征随机采样(保留原逻辑)
            np.random.seed(label_seed)
            feature_count = self.X_train.shape[1]
            sample_ratio = 1.0          # 从0.8改为1.0，不进行特征采样
            sample_cols = np.random.choice(
                feature_count, size=int(feature_count * sample_ratio), replace=False
            )
            self.X_train = self.X_train[:, sample_cols]
            self.X_val = self.X_val[:, sample_cols]
            self.feature_cols = [self.feature_cols[i] for i in sample_cols if i < len(self.feature_cols)]
            logger.info(f"[{self.label_col}]随机采样80%后：剩余{self.X_train.shape[1]}个特征")

            # 6. 标准化+异常值清理(续训时使用历史标准化器)
            # 修复：先检查scaler是否有mean_属性, 避免未拟合时报错
            if (self.check_version_exists 
                and hasattr(self, 'scaler') 
                and hasattr(self.scaler, 'mean_')  # 先检查mean_属性是否存在
                and self.scaler.mean_ is not None):
                
                # 续训：使用已加载的历史标准化器
                self.X_train_scaled = self.scaler.transform(self.X_train)
                self.X_val_scaled = self.scaler.transform(self.X_val)
                logger.info(f"[{self.label_col}]使用历史标准化器完成数据标准化")
            else:
                # 新训：拟合新的标准化器(会自动生成mean_和var_属性)
                self.X_train_scaled = self.scaler.fit_transform(self.X_train)
                self.X_val_scaled = self.scaler.transform(self.X_val)
                logger.info(f"[{self.label_col}]拟合新标准化器完成数据标准化")

            # 7. 模型训练(增强续训支持, 保存训练中间信息)
            logger.info(f"[{self.label_col}]开始LR训练(迭代次数上限={lr_params['max_iter']})")
            if self.model is None:
                self.model = LogisticRegression(** lr_params)
                logger.info(f"[{self.label_col}]初始化新LR模型")
            else:
                logger.info(f"[{self.label_col}]使用已有模型续训")
            
            self.model.fit(self.X_train_scaled, self.y_train)
            actual_iter = self.model.n_iter_[0]
            logger.info(f"[{self.label_col}]LR训练完成：实际迭代次数={actual_iter}")

            # 新增：保存训练中间信息
            self.train_process_info["training_info"] = {
                "max_iter": lr_params["max_iter"],
                "actual_iterations": actual_iter,
                "feature_sample_ratio": sample_ratio,
                "n_non_constant_features": len(non_constant_cols[non_constant_cols]),
                "final_feature_count": self.X_train_scaled.shape[1],
                "random_seed": label_seed
            }
            self._save_train_process_info()  # 保存中间训练状态

            # 8. 预测与最优阈值(调用基类保存阈值)
            logger.info(f"[{self.label_col}]计算预测概率与最优阈值...")
            y_pred_proba = self.model.predict_proba(self.X_val_scaled)[:, 1]
            threshold, best_f1 = self.find_optimal_threshold(self.y_val, y_pred_proba)
            logger.info(f"[{self.label_col}]最优阈值：{threshold:.4f}(F1={best_f1:.4f})")

            self._save_threshold({
                "optimal_threshold": round(threshold, 4),
                "best_f1_score": round(best_f1, 4)
            })

            # 9. 评估指标
            y_pred = np.where(y_pred_proba >= threshold, 1, 0)
            y_pred_proba = np.column_stack((1 - y_pred_proba, y_pred_proba))
            metrics = self.evaluate_model(self.y_val, y_pred, y_pred_proba)
            self.train_process_info["evaluation_metrics"] = metrics
            logger.info(f"[{self.label_col}]评估结果：AUC={metrics['auc']:.4f}, F1={metrics['f1']:.4f}")

            # 10. 特征重要性(保留原逻辑)
            if SSQ_CONFIG.get("log_feature_importance", False):
                coef_abs = np.abs(self.model.coef_[0])
                importance_dict = dict(zip(self.feature_cols, coef_abs))
                top_feats = sorted(importance_dict.items(), key=lambda x: x[1], reverse=True)[:5]
                logger.info(f"[{self.label_col}]Top5重要特征：{top_feats}")

            # 11. 保存模型(对齐XGB参数, 新增标准化器保存)
            model_name = f"lr_{self.label_col}"
            logger.info(f"[{self.label_col}]保存模型：{model_name}")
            lr_model_params = {
                **lr_params,
                "feature_sample_ratio": sample_ratio,
                "label_threshold": self.label_threshold,
                "n_non_constant_features": len(non_constant_cols[non_constant_cols]),
                "final_feature_count": self.X_train_scaled.shape[1]
            }
            
            # 新增：保存标准化器
            save_path = model_utils.save_model(
                model=self.model,
                model_name=model_name,
                model_type="lr",
                feature_cols=self.feature_cols,
                model_params=lr_model_params,
                threshold=threshold,
                version=self.train_version,
            )

            # 新增：保存非常量特征掩码(用于融合阶段) 修改 LR 训练代码，在删除常量特征后，保存non_constant_cols掩码(记录哪些特征是非常量的)
            try:
                # 基于模型保存路径生成掩码保存路径
                mask_save_path = save_path.replace(".pkl", "_non_constant_mask.pkl")
                joblib.dump(non_constant_cols, mask_save_path)
                logger.info(f"[{self.label_col}]非常量特征掩码保存成功: {mask_save_path}")
            except Exception as e:
                raise RuntimeError(f"[{self.label_col}]非常量特征掩码保存失败: {str(e)}\n{traceback.format_exc()}")
            # 在train_process_info中记录掩码路径
            self.train_process_info["non_constant_mask_path"] = mask_save_path

            # 再基于save_path生成标准化器的保存路径
            try:
                # 再基于save_path生成标准化器的保存路径
                scaler_save_path = save_path.replace(".pkl", "_scaler.pkl")
                joblib.dump(self.scaler, scaler_save_path)
                logger.info(f"[{self.label_col}]标准化器保存成功: {scaler_save_path}")
            except Exception as e:
                raise RuntimeError(f"[{self.label_col}]标准化器保存失败: {str(e)}\{traceback.format_exc()}")

            # 12. 保存最终过程信息
            self.train_process_info["model_path"] = save_path
            self.train_process_info["scaler_path"] = scaler_save_path  # 记录标准化器路径
            self.train_process_info["status"] = "success"
            self._save_train_process_info()

            # 13. 返回结果(完全对齐XGB格式, 包含训练中间信息)
            logger.info(f"[LR-def train()]-[{self.label_col}]训练完成, 返回设备: {self.lr_device}")
            return {
                "status": "success",
                "label_col": self.label_col,
                "metrics": metrics,
                "model_path": save_path,
                "version": self.train_version,
                "device": self.lr_device,
                "actual_iterations": actual_iter,
                "max_iter": lr_params["max_iter"]
            }

        except Exception as e:
            logger.error(f"[LR-def train()]-[{self.label_col}]训练失败: {str(e)}\n{traceback.format_exc()}")
            self.train_process_info["status"] = "fail"
            self.train_process_info["error"] = str(e)
            self.train_process_info["error_traceback"] = traceback.format_exc()  # 新增：保存错误堆栈
            self._save_train_process_info()
            return {
                "status": "fail",
                "label_col": self.label_col,
                "error": str(e)
            }

    @staticmethod
    def print_training_summary(all_results: list):
        """打印训练汇总(与XGB格式完全一致, 支持保存CSV)"""
        logger.info("="*100)
        logger.info("                      逻辑回归 训练结果汇总表格")
        logger.info("="*100)
        
        summary_data = []
        version = "unknown"
        for res in all_results:
            label_col = res["label_col"]
            status = res["status"]
            metrics = res.get("metrics", {})
            train_device = res.get("device", "cpu")
            version = res.get("version", "unknown")
            actual_iter = res.get("actual_iterations", "N/A")
            max_iter = res.get("max_iter", "N/A")
            summary_data.append({
                "标签列": label_col,
                "训练版本": version,
                "训练设备": train_device,
                "最大迭代次数": max_iter,
                "实际迭代次数": actual_iter,
                "AUC": metrics.get("auc", 0.0),
                "F1分数": metrics.get("f1", 0.0),
                "精确率": metrics.get("precision", 0.0),
                "召回率": metrics.get("recall", 0.0),
                "训练状态": status
            })
        
        summary_df = pd.DataFrame(summary_data)
        numeric_cols = ["AUC", "F1分数", "精确率", "召回率"]
        for col in numeric_cols:
            summary_df[col] = summary_df[col].round(4)
        
        logger.info("\n" + summary_df.to_string(index=False, col_space=10))
        
        # 统计信息
        total = len(all_results)
        success_count = sum(1 for res in all_results if res["status"] == "success")
        success_rate = (success_count / total) * 100 if total > 0 else 0.0
        avg_auc = summary_df[summary_df["训练状态"] == "success"]["AUC"].mean() if success_count > 0 else 0.0
        avg_f1 = summary_df[summary_df["训练状态"] == "success"]["F1分数"].mean() if success_count > 0 else 0.0
        
        logger.info("-"*100)
        logger.info(f"训练统计: 总标签数={total} | 成功数={success_count} | 成功率={success_rate:.2f}%")
        logger.info(f"平均性能(成功模型): 平均AUC={avg_auc:.4f} | 平均F1={avg_f1:.4f}")
        logger.info("="*100 + "\n")
        
        # 屏幕打印
        print("="*100)
        print("                      逻辑回归 训练结果汇总表格")
        print("="*100)
        print(summary_df.to_string(index=False, col_space=8))
        print("-"*100)
        print(f"训练统计: 总标签数={total} | 成功数={success_count} | 成功率={success_rate:.2f}%")
        print(f"平均性能(成功模型): 平均AUC={avg_auc:.4f} | 平均F1={avg_f1:.4f}")
        print("="*100 + "\n")


if __name__ == "__main__":
    # 示例运行代码(与XGB示例对齐)
    try:
        sample_data = {
            "X_train": np.random.rand(1000, 50),
            "X_val": np.random.rand(200, 50),
            "y_train": np.random.randint(0, 2, 1000),
            "y_val": np.random.randint(0, 2, 200),
            "feature_cols": [f"feat_{i}" for i in range(50)],
            "threshold": 0.5
        }
        
        # 首次训练
        logger.info("====== 首次训练 ======")
        trainer = LogisticRegressionTrainer(label_col="r1_next", data_dict=sample_data)
        result = trainer.train()
        version = result["version"]
        
        # 续训(加载已有版本)
        logger.info("\n====== 续训 ======")
        retrainer = LogisticRegressionTrainer(label_col="r1_next", data_dict=sample_data, load_version=version)
        retrain_result = retrainer.train()
        
        # 打印汇总
        LogisticRegressionTrainer.print_training_summary([result, retrain_result])
    except Exception as e:
        logger.critical(f"示例运行失败: {str(e)}\n{traceback.format_exc()}")
