import multiprocessing
from multiprocessing import Pool
import threading
import os
import os.path as osp
from typing import Optional, Dict, List
import traceback
import pandas as pd
import torch  # 新增: 引入torch控制GPU
import json
import trainer.model_train.model_utils as model_utils
from trainer.model_train.lgb_train import LGBModelTrainer
from trainer.model_train.xgb_train import XGBModelTrainer
from trainer.model_train.lstm_train import LSTMModelTrainer
from trainer.model_train.logistic_regression_train import LogisticRegressionTrainer
from trainer.model_train.random_forest_train import RandomForestTrainer
from config.ssq_config import SSQ_CONFIG, init_global_logger, logger

# 强制多进程使用spawn启动方式( 兼容CPU多进程) 
multiprocessing.set_start_method("spawn", force=True)

# 全局变量, 用于子进程共享data_dict(大型特征矩阵)
global_shared_data = None

def init_worker(data_dict):
    """子进程初始化函数: 注入共享数据到全局变量"""
    global global_shared_data
    global_shared_data = data_dict  # 子进程启动时仅拷贝1次, 所有任务共用

'''
关键修改说明
1. 共享数据传递优化: 通过init_worker注入全局变量, 避免任务列表重复传递大型数据
2. 任务列表简化: 仅传递(label, idx), 不包含data_dict, 减少内存开销
3. 数据加载修复: load_shared_data用第一个标签获取共享数据, 适配所有标签
4. 其他bug修复: 解决label_col、config_label_col未定义问题
5. 新增多线程支持: 模型级多线程并行, 标签级多进程训练
6. 日志分离: 入口日志(main.log) + 各模型独立日志(如xgb_train.log)
'''

# ===================== 新增: 模型训练线程类(不侵入原有逻辑)=====================
class ModelTrainThread(threading.Thread):
    """模型训练线程: 单独承载一个模型的完整训练流程(内部含多进程训练标签)"""
    def __init__(self, model_name: str, train_func):
        super().__init__()
        self.model_name = model_name  # 模型名称(如XGBoost)
        self.train_func = train_func  # 模型训练函数(如train_xgb)
        self.success = False  # 记录线程训练结果

    def run(self) -> None:
        """线程执行逻辑: 调用模型训练函数, 独立执行训练"""
        logger.info(f"[{self.model_name}训练线程]启动(线程ID: {threading.get_ident()})")
        try:
            # 执行模型训练(内部自动触发多进程训练33个标签)
            self.success = self.train_func()
            if self.success:
                logger.info(f"[{self.model_name}训练线程]训练完成！")
            else:
                logger.error(f"[{self.model_name}训练线程]训练失败！")
        except Exception as e:
            logger.error(
                f"[{self.model_name}训练线程]崩溃: {str(e)}\n{traceback.format_exc()}"
            )
            self.success = False

class SSQMultiTrainer:
    def __init__(self, model_type: str=None, run_type: str = 'train'):
        """初始化指定模型类型的训练器, 强制使用CPU"""
        # 校验数据类型合法性
        if run_type not in ["train", "predict"]:
            raise ValueError(f"数据类型错误！仅支持 'train' 或 'predict', 当前输入: {run_type}")
        
        self.run_type = run_type  # 新增: 记录数据用途(train/predict)
        self.model_type = model_type  # 模型类型: XGBoost/LightGBM/LSTM/LogisticRegression/RandomForest == xgb/lgb/lstm/lr/rf
        self.label_cols = SSQ_CONFIG['label_cols']
        self.train_version = f'v{model_type.upper()}_{SSQ_CONFIG['train_version']}'   # 支持传入的版本参数
        self.results: List[Dict] = []

        # 新增: 根据数据类型确定保存文件名(从配置文件读取, 避免硬编码)
        self.summary_dir = SSQ_CONFIG["train_file"]["model_save_dir"]
        if self.run_type == "predict":
            self.summary_dir = SSQ_CONFIG["predict_file"]["model_save_dir"]

        # 关键: 仅LSTM启用GPU, 其他模型强制CPU
        self.use_gpu = (self.model_type == "LSTM") and torch.cuda.is_available()
        if self.use_gpu:
            self.gpu_id = 0  # 可改为多卡逻辑, 如0,1...
            logger.info(f"LSTM启用GPU训练 (设备: cuda:{self.gpu_id}, 名称: {torch.cuda.get_device_name(self.gpu_id)})")
        else:
            logger.info(f"{self.model_type}强制使用CPU训练")
        
        # 进程数控制: GPU进程数≤GPU核心数(避免内存溢出), CPU进程数按核心数分配
        self.max_processes = self._get_process_num()
        
    def _get_process_num(self) -> int:
        cpu_count = multiprocessing.cpu_count()
        if self.model_type == "LSTM":
            if self.use_gpu:
                # GPU模式: 进程数≤GPU数量(单卡建议≤4, 避免OOM)
                return max(1, min(4, len(self.label_cols)))
            else:
                # LSTM CPU模式: 进程数少一些(计算密集)
                return max(1, min(cpu_count // 8, len(self.label_cols)))
        else:
            # 其他模型CPU模式
            return max(1, min(cpu_count // 4, len(self.label_cols)))

    def load_shared_data(self) -> bool:
        """统一加载共享特征数据(所有标签共用, 仅加载1次)"""
        try:
            # 用第一个标签获取共享数据(所有标签共用同一套特征矩阵和参数)
            sample_label = self.label_cols[0]
            logger.info(f"基于标签[{sample_label}]加载共享数据集...")
            
            # 调用公共函数初始化数据集(所有标签共用X_train/X_val/feature_cols等)
            success, data_dict = model_utils.init_dataset(
                run_type=self.run_type,
                label_col=sample_label,
                config_label_cols=SSQ_CONFIG['label_cols'],  # 修复: 用全局配置的所有标签列
                load_test=False,
                print_sample=False  # 关闭冗余样本打印, 减少IO
            )
            
            if not success:
                logger.error(f"共享数据集初始化失败(基于标签[{sample_label}])")
                return False
            
            # 赋值给全局变量, 供子进程初始化使用
            global global_shared_data
            global_shared_data = data_dict
            
            logger.info(f"共享数据集加载成功！包含关键数据: {list(data_dict.keys())}")
            logger.info(f"训练集样本数: {len(data_dict['X_train'])} | 特征数: {len(data_dict['feature_cols'])}")
            return True
        except Exception as e:
            logger.error(f"共享数据集加载崩溃: {str(e)}\n{traceback.format_exc()}")
            return False

    def train_single_label_worker(self, args: tuple) -> Optional[Dict]:
        """单标签训练worker(从全局变量获取共享数据, 仅使用CPU)"""
        # 仅接收(label_col, label_idx), 不接收data_dict(从全局变量获取)
        label_col, label_idx, load_version,run_type = args

        # 子进程重新初始化模型专属logger(覆盖主进程配置)
        # init_global_logger 中的 logger.remove(), 会移除所有现有处理器, 重新初始化后, 日志会只写入当前指定的文件(如xgb_train.log), 不会再写main.log。
        init_global_logger(log_file=f"{self.model_type}_train.log", pid=os.getpid())

        logger.info(f"开始训练[{run_type}]的[{label_col}]标签的{self.model_type}模型(索引: {label_idx} | 设备: {'GPU' if self.use_gpu else 'CPU'})")

        try:
            # 从全局变量获取共享数据(子进程初始化时已注入)
            global global_shared_data
            data_dict = global_shared_data
            if data_dict is None:
                logger.error(f"[{label_col}]未获取到共享数据, 训练终止")
                return None

            # 1. 强制使用CPU, 不设置任何GPU环境变量
            logger.info(f"multi_models_train.train_single_label_worker() [{label_col}]强制使用CPU训练, 模型类型: {self.model_type}")

            # 2. 根据模型类型初始化训练器(传递共享数据字典)
            if self.model_type == "xgb":
                trainer = XGBModelTrainer(label_col=label_col, data_dict=data_dict, load_version=load_version,run_type=run_type)
            elif self.model_type == "lgb":
                trainer = LGBModelTrainer(label_col=label_col, data_dict=data_dict, load_version=load_version,run_type=run_type)
            elif self.model_type == "lstm":
                trainer = LSTMModelTrainer(label_col=label_col, data_dict=data_dict, load_version=load_version,run_type=run_type)
            elif self.model_type == "lr":
                trainer = LogisticRegressionTrainer(label_col=label_col, data_dict=data_dict, load_version=load_version,run_type=run_type)
            elif self.model_type == "rf":
                trainer = RandomForestTrainer(label_col=label_col, data_dict=data_dict, load_version=load_version,run_type=run_type)
            else:
                logger.error(f"不支持的模型类型: {self.model_type}")
                return None

            # 3. 执行训练(训练器内部从data_dict获取X_train/X_val等数据)
            '''
            模型训练函数的返回值如下: train
            return {
                "status": "success",
                "label_col": self.label_col,
                "metrics": metrics,
                "device": self.xgb_device,
                "version": self.train_version,
                "model_path": save_path,
                "best_ntree_limit": int(best_ntree_limit),
                "total_trees": int(total_trees)
            }
            '''
            result = trainer.train()
            logger.info(f'self.model_type train() result: {result}')
            if result and result.get("status") == "success":
                logger.info(f"[{label_col}]训练成功, AUC: {result['metrics']['auc']:.4f}( 设备: CPU) ")
                logger.info(f"[{label_col}]训练成功, AUC: {result['metrics']['auc']:.4f}(设备: {result["device"]})")
                return {
                    "label_col": label_col,
                    "metrics": result["metrics"],
                    "status": "success",
                    "version": result.get("version"),
                    "device": result["device"]
                }
            else:
                logger.error(f"[{self.model_type}.{label_col}]训练返回失败结果,{traceback.format_exc()}")
                return None
        except Exception as e:
            logger.error(
                f"[{label_col}]训练崩溃: {str(e)}\n{traceback.format_exc()}"
            )
            return None

    def run(self,run_type: str = None) -> bool:
        """执行当前模型类型的所有标签训练( 仅使用CPU) """
        logger.info(f"===== 开始{self.model_type}模型训练(共{len(self.label_cols)}个标签) =====")
        
        # 1. 加载共享数据(仅加载1次)
        if not self.load_shared_data():
            logger.critical("加载共享数据集失败, 终止训练流程！")
            return False
        
        # 2. 多进程训练所有标签( 仅使用CPU进程) 
        try:
            logger.info(f'采用{self.max_processes}个CPU进程开始训练(共享数据模式)')

            # 关键: LSTM用spawn模式(必须), 其他模型可选spawn(兼容)
            ctx = multiprocessing.get_context("spawn")  # 强制spawn模式

            # 构建简化任务列表: 仅包含(label, idx), 无data_dict(从全局变量获取)
            tasks = [ (label, idx, self.train_version, run_type) for idx, label in enumerate(self.label_cols)]

            with ctx.Pool(  # 使用spawn上下文创建进程池
                processes=self.max_processes,
                maxtasksperchild=1,  # 每个子进程处理1个任务后销毁(GPU更安全)
                initializer=init_worker,
                initargs=(global_shared_data,),
            ) as pool:
                raw_results = pool.map(self.train_single_label_worker, tasks)

            # 3. 汇总有效结果
            '''
            raw_results的返回值字典: 
            return {
                "label_col": label_col,
                "metrics": result["metrics"],
                "status": "success",
                "version": result.get("version"),
                "device": result["device"]
            }
            '''
            self.results = [res for res in raw_results if res is not None]
            success_rate = len(self.results) / len(self.label_cols) * 100
            logger.info(f"所有标签训练完成, 成功{len(self.results)}/{len(self.label_cols)}(成功率: {success_rate:.1f}%)")
            
            # 4. 调用对应模型的汇总打印方法
            if self.model_type == "xgb" and hasattr(XGBModelTrainer, "print_training_summary"):
                XGBModelTrainer.print_training_summary(self.results)
            elif self.model_type == "lgb" and hasattr(LGBModelTrainer, "print_training_summary"):
                LGBModelTrainer.print_training_summary(self.results)
            elif self.model_type == "lstm" and hasattr(LSTMModelTrainer, "print_training_summary"):
                LSTMModelTrainer.print_training_summary(self.results)
            elif self.model_type == "lr" and hasattr(LogisticRegressionTrainer, "print_training_summary"):
                LogisticRegressionTrainer.print_training_summary(self.results)
            elif self.model_type == "rf" and hasattr(RandomForestTrainer, "print_training_summary"):
                RandomForestTrainer.print_training_summary(self.results)
            else:
                logger.warning(f"{self.model_type}训练器无汇总打印方法, 跳过")
            
            # 5. 保存训练汇总结果
            self._save_training_summary()
            return True
        except Exception as e:
            logger.critical(f"多进程训练崩溃: {str(e)}\n{traceback.format_exc()}")
            return False

    def _save_training_summary(self) -> None:
        """保存当前模型类型的训练汇总结果, 并打印+保存多模型指标对比"""       
        # 保存当前模型的训练汇总(原有逻辑不变, 保留)
        current_summary_path = osp.join(self.summary_dir, f"{self.model_type}_training_summary.csv")
        summary_df = pd.DataFrame(self.results)
        summary_df.to_csv(current_summary_path, index=False, encoding='utf-8')
        logger.info(f"当前模型[{self.model_type}]训练汇总保存至: {current_summary_path}")

        # ==============================================
        # 多模型指标对比(核心修复: 动态读取每个模型的汇总文件)
        # ==============================================
        try:
            # 1. 定义需要对比的模型类型(从配置读取, 确保覆盖所有训练的模型)
            model_types = SSQ_CONFIG.get('model_types', ['xgb', 'lgb', 'rf', 'lr', 'lstm'])
            all_model_dfs = []  # 存储所有模型的解析后数据
            
            # 2. 遍历每个模型类型, 动态读取对应汇总文件(核心修复)
            for model_type in model_types:
                # 动态生成当前模型的汇总文件路径(按模型类型命名)
                model_summary_path = osp.join(self.summary_dir, f"{model_type}_training_summary.csv")
                
                logger.warning(f"模型[{model_type}]的汇总文件路径为: {model_summary_path}")
                # 跳过不存在的文件
                if not osp.exists(model_summary_path):
                    logger.warning(f"模型[{model_type}]的汇总文件不存在, 跳过对比: {model_summary_path}")
                    continue
                
                # 读取当前模型的汇总文件
                df = pd.read_csv(model_summary_path)
                
                # 校验必要列是否存在
                required_cols = ["label_col", "metrics", "status"]
                if not all(col in df.columns for col in required_cols):
                    logger.warning(f"模型[{model_type}]的汇总文件缺少必要列(需包含{required_cols}), 跳过")
                    continue
                
                # 3. 解析 metrics 列(字符串转字典, 兼容单引号/双引号)
                def parse_metrics(metrics_str):
                    try:
                        # 安全解析: 先替换单引号为双引号, 再用json.loads(比eval更安全)
                        safe_str = metrics_str.replace("'", '"').replace("None", "null").replace("True", "true").replace("False", "false")
                        metrics = json.loads(safe_str)
                        # 提取需要的指标, 默认值为0.0
                        return pd.Series([
                            metrics.get("auc", 0.0),
                            metrics.get("f1", 0.0),
                            metrics.get("precision", 0.0),
                            metrics.get("recall", 0.0)
                        ])
                    except Exception as e:
                        logger.warning(f"解析模型[{model_type}]的metrics失败: {e}, 指标设为0.0")
                        return pd.Series([0.0, 0.0, 0.0, 0.0])
                
                # 新增解析后的指标列
                df[["auc", "f1", "precision", "recall"]] = df["metrics"].apply(parse_metrics)
                df["model_type"] = model_type  # 标记当前模型类型(关键: 区分不同模型)
                
                # 只保留需要的列, 避免冗余
                keep_cols = ["label_col", "model_type", "auc", "f1", "precision", "recall", "status"]
                all_model_dfs.append(df[keep_cols])

            # 4. 合并所有模型数据(若无有效数据则退出)
            if not all_model_dfs:
                logger.info("无有效模型汇总文件, 跳过多模型对比")
                return
            
            combined_df = pd.concat(all_model_dfs, ignore_index=True)
            # 只保留训练成功的记录
            combined_df = combined_df[combined_df["status"] == "success"].reset_index(drop=True)
            if combined_df.empty:
                logger.info("无成功训练的模型, 跳过多模型对比")
                return

            # ==============================================
            # 5. 打印对比结果(优化格式, 更易读)
            # ==============================================
            logger.info("\n" + "="*120)
            logger.info("                          多模型指标对比汇总")
            logger.info("="*120)

            # 5.1 各模型整体平均指标(按模型分组求平均)
            avg_metrics = combined_df.groupby("model_type").agg({
                "auc": "mean",
                "f1": "mean",
                "precision": "mean",
                "recall": "mean",
                "label_col": "count"  # 统计有效标签数
            }).rename(columns={"label_col": "有效标签数"}).round(4)
            logger.info("\n[各模型整体平均指标]")
            logger.info(avg_metrics.to_string())

            # 5.2 标签级对比(按标签分组, 显示所有模型的指标)
            all_labels = sorted(combined_df["label_col"].unique())  # 排序标签, 便于查看
            logger.info(f"\n\n[标签级指标对比(共{len(all_labels)}个标签)]")
            # 遍历所有标签, 打印每个标签下各模型的指标
            for label in all_labels:
                label_df = combined_df[combined_df["label_col"] == label][
                    ["model_type", "auc", "f1", "precision", "recall"]
                ].set_index("model_type").round(4)
                logger.info(f"\n标签: {label}")
                logger.info(label_df.to_string())

            # 5.3 强标签分布(AUC≥0.53的标签数量, 可调整阈值)
            strong_auc_threshold = 0.53
            strong_labels = combined_df[combined_df["auc"] >= strong_auc_threshold].groupby("model_type").size()
            logger.info(f"\n\n[强标签分布(AUC≥{strong_auc_threshold})]")
            logger.info(strong_labels.to_string())

            logger.info("\n" + "="*120 + "\n")

            # ==============================================
            # 6. 保存多模型对比汇总CSV(按「标签+模型类型」唯一组合保存)
            # ==============================================
            # 整理最终保存的数据(标签级详细数据, 最实用)
            final_save_df = combined_df[["label_col", "model_type", "auc", "f1", "precision", "recall"]].copy()
            # 重命名列名, 更直观
            final_save_df.rename(columns={
                "label_col": "标签名称",
                "model_type": "模型类型",
                "auc": "AUC",
                "f1": "F1分数",
                "precision": "精确率",
                "recall": "召回率"
            }, inplace=True)
            # 按「标签名称」和「模型类型」排序
            final_save_df = final_save_df.sort_values(["标签名称", "模型类型"]).reset_index(drop=True)

            # 保存文件
            multi_summary_path = osp.join(self.summary_dir, "multi_model_training_summary.csv")
            final_save_df.to_csv(multi_summary_path, index=False, encoding="utf-8-sig")  # utf-8-sig支持中文
            logger.info(f"多模型指标对比汇总已保存至: {multi_summary_path}")

        except Exception as e:
            logger.error(f"多模型指标对比或保存失败: {str(e)}\n{traceback.format_exc()}")

# 各模型独立训练入口( 均使用CPU) 
def train_xgb() -> bool:
    return SSQMultiTrainer(model_type="XGBoost").run()

def train_lgb() -> bool:
    return SSQMultiTrainer(model_type="LightGBM").run()

def train_lstm() -> bool:
    return SSQMultiTrainer(model_type="LSTM").run()

def train_lr() -> bool:
    return SSQMultiTrainer(model_type="LogisticRegression").run()

def train_rf() -> bool:
    return SSQMultiTrainer(model_type="RandomForest").run()


def mutil_train():
    """模型训练主入口( 强制CPU + 多线程并行模型 + 日志分离) """
    # 入口日志初始化(单独保存到main.log, 与各模型日志分离)
    init_global_logger(log_file="main.log", pid=os.getpid())
    logger.info("===== 多模型训练系统启动( 强制使用CPU, 多线程并行)  =====")
    
    # 跳过GPU环境检查, 直接执行训练
    logger.info("跳过GPU环境检查, 强制使用CPU训练")
    
    # 可选择需要运行的模型(注释掉不需要的)
    model_steps = [
        ("XGBoost", train_xgb),
        # ("LightGBM", train_lgb),
        # ("LSTM", train_lstm),
        ("LogisticRegression", train_lr),
        # ("RandomForest", train_rf),
    ]
    try:
        #=====================
        # 1. 创建模型训练线程(每个模型一个线程)
        training_threads = []
        for model_name, train_func in model_steps:
            thread = ModelTrainThread(model_name=model_name, train_func=train_func)
            training_threads.append(thread)
            logger.info(f"创建{model_name}训练线程")
        
        # 2. 启动所有线程
        for thread in training_threads:
            thread.start()
            logger.info(f"启动{thread.model_name}训练线程(线程ID: {thread.ident})")
        
        # 3. 等待所有线程完成
        for thread in training_threads:
            thread.join()
            logger.info(f"{thread.model_name}训练线程执行结束(训练成功: {thread.success})")
        
        # 4. 校验结果
        all_models_success = all(thread.success for thread in training_threads)
        if all_models_success:
            logger.info("===== 所有模型训练完成 =====")
        else:
            failed_models = [thread.model_name for thread in training_threads if not thread.success]
            logger.critical(f"===== 训练失败的模型: {failed_models} =====")
            return False
        
        return True
    except Exception as e:
        logger.critical(f"模型训练异常终止: {str(e)},\n {traceback.format_exc()}")
        return False

if __name__ == "__main__":
    try:
        mutil_train()
    except Exception as e:
        # 主线程异常日志写入main.log
        logger.critical(f"主程序崩溃: {str(e)}\n{traceback.format_exc()}")
        exit(1)