from multiprocessing import Pool
import os
import json
import numpy as np
from typing import Optional, Dict, List, Tuple
import traceback
import pandas as pd
from datetime import datetime
from trainer.features.plus30_feature import SSQ30PlusFeatureEngineer
from trainer.features.fractal_mutil_calc import MultiWindowFractalEngine
from config.ssq_config import SSQ_CONFIG, DATA_FOLDER, init_global_logger, logger  # 基础日志

'''
2025.10.29 增强版: 支持中间过程文件保存
核心改进: 
1. 版本化管理中间文件, 避免覆盖
2. 分阶段保存所有关键步骤数据(分形特征、plus30特征、01数据、标签等)
3. 记录完整元数据(配置参数、数据统计、字段信息)
4. 支持重复运行和断点续用, 确保训练可复现
'''
class MergeFeatures:
    def __init__(self,run_type: str = None):
        # 校验数据类型合法性
        if run_type not in ["train", "predict"]:
            raise ValueError(f"数据类型错误！仅支持 'train' 或 'predict', 当前输入: {run_type}")
        self.run_type = run_type

        self.feat_df = None  # 合并后的特征矩阵
        self.fractal_feats = None  # 分形特征(作为基准)
        self.plus30_feats = None  # plus30特征
        self.config_label_cols = SSQ_CONFIG['label_cols']
        self.df_01 = None  # 01格式化数据
        self.results: List[Dict] = []
        # 预测模式适配：日志文件区分训练/预测，避免覆盖
        log_filename = f"merge_features_{run_type}.log"
        init_global_logger(log_file=log_filename, pid=os.getpid())
        
        # 核心配置参数
        self.merge_key = "idx"  # 期号对齐键
        self.missing_strategy = SSQ_CONFIG.get("missing_strategy", "median")      # 处理特征列缺失值的方式
        # 数据集划分配置
        self.train_ratio = SSQ_CONFIG['train_config'].get("train_ratio", 0.75)
        self.val_ratio = SSQ_CONFIG['train_config'].get("val_ratio", 0.15)
        self.test_ratio = SSQ_CONFIG['train_config'].get("test_ratio", 0.1)

        # 恢复原始路径逻辑：训练和预测仅区分目录，文件名沿用原有定义（避免硬编码）
        
        self.data_folder = SSQ_CONFIG["train_file"]["DATA_FOLDER"]
        self.csv01_file = os.path.join(self.data_folder, SSQ_CONFIG['train_file']['csv01_file'])
        self.intermediate_root = os.path.join(self.data_folder, "intermediate_data")
        self.train_save_path = os.path.join(self.data_folder,SSQ_CONFIG['train_file']['train_data_path'])
        self.val_save_path = os.path.join(self.data_folder,SSQ_CONFIG['train_file']['val_data_path'])
        self.test_save_path = os.path.join(self.data_folder,SSQ_CONFIG['train_file']['test_data_path'])
        self.merged_save_path = os.path.join(self.data_folder,SSQ_CONFIG['train_file']['final_merge_features'])
        if self.run_type == "predict":
            self.data_folder = SSQ_CONFIG["predict_file"]["DATA_FOLDER"]
            self.csv01_file = os.path.join(self.data_folder, SSQ_CONFIG['predict_file']['csv01_file'])
            self.intermediate_root = os.path.join(self.data_folder, "intermediate_data")
            # 预测模式无需训练/验证/测试集路径，仅保留合并特征路径（文件名沿用final_merge_features）
            self.train_save_path = os.path.join(self.data_folder, "predict_train.csv")  # 占位，实际不使用
            self.val_save_path = os.path.join(self.data_folder, "predict_val.csv")      # 占位，实际不使用
            self.test_save_path = os.path.join(self.data_folder, "predict_test.csv")    # 占位，实际不使用
            self.merged_save_path = os.path.join(self.data_folder, SSQ_CONFIG['train_file']['final_merge_features'])  # 复用训练模式的文件名定义
        
        # # 只取 r1-r33 列
        self.red_cols_01 = [
            col for col in SSQ_CONFIG['csv01_header'] 
            if col.startswith('r') and col[1:].isdigit()  
        ]
        self.label_cols = [f"{col}_next" for col in self.red_cols_01]  # 标签列: r1_next-r33_next

        logger.info(f'数据集划分比例: train:{self.train_ratio}, val:{self.val_ratio}, test:{self.test_ratio}')
        logger.info(f'运行模式: {self.run_type}, 数据根目录: {self.data_folder}')
        logger.info(f'01数据文件路径: {self.csv01_file}')
        logger.info(f'合并特征保存路径: {self.merged_save_path}')
        logger.info(f'self.red_cols_01: {self.red_cols_01}')
        logger.info(f'self.label_cols: {self.label_cols}')

        # ---------------------- 中间过程文件保存配置 ----------------------
        # 版本号(默认按时间生成, 支持配置指定)
        self.data_version = SSQ_CONFIG['train_version']
        
        # 分阶段目录(按流程顺序编号)
        self.intermediate_dirs = {
            "fractal": os.path.join(self.intermediate_root, self.data_version, "01_fractal_feats"),
            "plus30": os.path.join(self.intermediate_root, self.data_version, "02_plus30_feats"),
            "01_data": os.path.join(self.intermediate_root, self.data_version, "03_01_data"),
            "labels": os.path.join(self.intermediate_root, self.data_version, "04_labels"),
            "merged_raw": os.path.join(self.intermediate_root, self.data_version, "05_merged_raw"),
            "merged_processed": os.path.join(self.intermediate_root, self.data_version, "06_merged_processed"),
            "split_datasets": os.path.join(self.intermediate_root, self.data_version, "07_split_datasets"),
            "metadata": os.path.join(self.intermediate_root, self.data_version, "metadata")
        }
        # 创建所有目录(重复运行不报错)
        for dir_path in self.intermediate_dirs.values():
            os.makedirs(dir_path, exist_ok=True)
            logger.info(f"中间文件目录准备完成: {dir_path}")
        
        # 元数据记录(全程跟踪流程信息)
        self.metadata = {
            "version": self.data_version,
            "create_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
            "run_type": self.run_type,  # 预测模式适配：元数据记录运行类型
            "config": {
                "merge_key": self.merge_key,
                "missing_strategy": self.missing_strategy,
                "train_ratio": self.train_ratio,
                "val_ratio": self.val_ratio,
                "test_ratio": self.test_ratio,
                "red_cols_count": len(self.red_cols_01),
                "label_cols_count": len(self.label_cols)
            },
            "data_stats": {}  # 动态填充各阶段数据统计
        }

    # ---------------------- 中间文件保存方法 ----------------------
    def _save_intermediate_metadata(self):
        """保存完整元数据(配置+统计信息)"""
        try:
            metadata_path = os.path.join(self.intermediate_dirs["metadata"], "process_metadata.json")
            with open(metadata_path, "w", encoding="utf-8") as f:
                json.dump(self.metadata, f, indent=2, ensure_ascii=False)
            logger.info(f"元数据已保存至: {metadata_path}")
            return True
        except Exception as e:
            logger.error(f"保存元数据失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def _save_fractal_intermediate(self, df: pd.DataFrame):
        """保存分形特征中间文件及统计信息"""
        try:
            # 保存CSV(人类可读)和Numpy(快速加载)
            csv_path = os.path.join(self.intermediate_dirs["fractal"], "fractal_feats.csv")
            df.to_csv(csv_path, index=False, encoding="utf-8")
            np_path = os.path.join(self.intermediate_dirs["fractal"], "fractal_feats.npy")
            np.save(np_path, df.values)
            
            # 保存列名信息(确保特征对齐)
            cols_path = os.path.join(self.intermediate_dirs["fractal"], "fractal_cols.json")
            with open(cols_path, "w", encoding="utf-8") as f:
                json.dump(df.columns.tolist(), f, indent=2)
            
            # 预测模式适配：处理空数据的min/max计算（避免NaN转int错误）
            if not df.empty:
                idx_min = int(df[self.merge_key].min())
                idx_max = int(df[self.merge_key].max())
            else:
                idx_min = -1
                idx_max = -1
            
            # 更新元数据
            self.metadata["data_stats"]["fractal_feats"] = {
                "shape": df.shape,
                "idx_min": idx_min,
                "idx_max": idx_max,
                "feature_count": len(df.columns) - 1  # 排除idx列
            }
            logger.info(f"分形特征中间文件已保存至: {self.intermediate_dirs['fractal']}")
            return True
        except Exception as e:
            logger.error(f"保存分形特征失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def _save_plus30_intermediate(self, df: pd.DataFrame):
        """保存plus30特征中间文件"""
        try:
            csv_path = os.path.join(self.intermediate_dirs["plus30"], "plus30_feats.csv")
            df.to_csv(csv_path, index=False, encoding="utf-8")
            np_path = os.path.join(self.intermediate_dirs["plus30"], "plus30_feats.npy")
            np.save(np_path, df.values)
            cols_path = os.path.join(self.intermediate_dirs["plus30"], "plus30_cols.json")
            with open(cols_path, "w", encoding="utf-8") as f:
                json.dump(df.columns.tolist(), f, indent=2)
            
            # 预测模式适配：处理空数据的min/max计算（避免NaN转int错误）
            if not df.empty:
                idx_min = int(df[self.merge_key].min())
                idx_max = int(df[self.merge_key].max())
            else:
                idx_min = -1
                idx_max = -1
            
            self.metadata["data_stats"]["plus30_feats"] = {
                "shape": df.shape,
                "idx_min": idx_min,
                "idx_max": idx_max,
                "feature_count": len(df.columns) - 1
            }
            logger.info(f"plus30特征中间文件已保存至: {self.intermediate_dirs['plus30']}")
            return True
        except Exception as e:
            logger.error(f"保存plus30特征失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def _save_01data_intermediate(self, df: pd.DataFrame):
        """保存01格式化数据中间文件"""
        try:
            csv_path = os.path.join(self.intermediate_dirs["01_data"], "01_formatted_data.csv")
            df.to_csv(csv_path, index=False, encoding="utf-8")
            np_path = os.path.join(self.intermediate_dirs["01_data"], "01_formatted_data.npy")
            np.save(np_path, df.values)
            cols_path = os.path.join(self.intermediate_dirs["01_data"], "01_cols.json")
            with open(cols_path, "w", encoding="utf-8") as f:
                json.dump(df.columns.tolist(), f, indent=2)
            
            # 核心修复: 判断DataFrame是否为空, 再处理元数据（避免NaN转int错误）
            if not df.empty:
                idx_min = int(df[self.merge_key].min())
                idx_max = int(df[self.merge_key].max())
            else:
                # 空数据时, 元数据填默认值
                idx_min = -1
                idx_max = -1
            
            self.metadata["data_stats"]["01_data"] = {
                "shape": df.shape,
                "idx_min": idx_min,
                "idx_max": idx_max,
                "red_cols_count": len(self.red_cols_01)
            }
            logger.info(f"01数据中间文件已保存至: {self.intermediate_dirs['01_data']}")
            return True
        except Exception as e:
            logger.error(f"保存01数据失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def _save_labels_intermediate(self, df: pd.DataFrame):
        """保存标签数据中间文件（适配训练/预测模式）"""
        try:
            csv_path = os.path.join(self.intermediate_dirs["labels"], "labels_next.csv")
            df.to_csv(csv_path, index=False, encoding="utf-8")
            np_path = os.path.join(self.intermediate_dirs["labels"], "labels_next.npy")
            np.save(np_path, df.values)
            cols_path = os.path.join(self.intermediate_dirs["labels"], "label_cols.json")
            with open(cols_path, "w", encoding="utf-8") as f:
                json.dump(df.columns.tolist(), f, indent=2)
            
            # 核心修复：判断标签DataFrame是否为空（避免NaN转int错误）
            if not df.empty:
                idx_min = int(df[self.merge_key].min())
                idx_max = int(df[self.merge_key].max())
            else:
                idx_min = -1
                idx_max = -1
            
            self.metadata["data_stats"]["labels"] = {
                "shape": df.shape,
                "idx_min": idx_min,
                "idx_max": idx_max,
                "label_count": len(self.label_cols)
            }
            logger.info(f"标签中间文件已保存至: {self.intermediate_dirs['labels']}")
            return True
        except Exception as e:
            logger.error(f"保存标签数据失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def _save_merged_raw_intermediate(self, df: pd.DataFrame):
        """保存合并后原始特征(未处理缺失值)"""
        try:
            csv_path = os.path.join(self.intermediate_dirs["merged_raw"], "merged_raw_feats.csv")
            df.to_csv(csv_path, index=False, encoding="utf-8")
            np_path = os.path.join(self.intermediate_dirs["merged_raw"], "merged_raw_feats.npy")
            np.save(np_path, df.values)
            
            # 保存列类型信息(区分特征/标签/键)
            cols_info = {
                "merge_key": self.merge_key,
                "feature_cols": [col for col in df.columns if col not in [self.merge_key] + self.label_cols],
                "label_cols": self.label_cols
            }
            cols_info_path = os.path.join(self.intermediate_dirs["merged_raw"], "cols_info.json")
            with open(cols_info_path, "w", encoding="utf-8") as f:
                json.dump(cols_info, f, indent=2)
            
            # 预测模式适配：处理空数据的min/max计算（避免NaN转int错误）
            if not df.empty:
                idx_min = int(df[self.merge_key].min())
                idx_max = int(df[self.merge_key].max())
            else:
                idx_min = -1
                idx_max = -1
            
            self.metadata["data_stats"]["merged_raw"] = {
                "shape": df.shape,
                "idx_min": idx_min,
                "idx_max": idx_max,
                "feature_count": len(cols_info["feature_cols"]),
                "label_count": len(self.label_cols)
            }
            logger.info(f"原始合并特征已保存至: {self.intermediate_dirs['merged_raw']}")
            return True
        except Exception as e:
            logger.error(f"保存原始合并特征失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def _save_merged_processed_intermediate(self, df: pd.DataFrame):
        """保存处理缺失值后的合并特征"""
        try:
            csv_path = os.path.join(self.intermediate_dirs["merged_processed"], "merged_processed_feats.csv")
            df.to_csv(csv_path, index=False, encoding="utf-8")
            np_path = os.path.join(self.intermediate_dirs["merged_processed"], "merged_processed_feats.npy")
            np.save(np_path, df.values)
            
            # 保存缺失值处理记录
            missing_stats = df.isnull().sum()
            missing_stats_path = os.path.join(self.intermediate_dirs["merged_processed"], "missing_stats.json")
            with open(missing_stats_path, "w", encoding="utf-8") as f:
                json.dump(missing_stats[missing_stats > 0].to_dict(), f, indent=2)
            
            # 预测模式适配：处理空数据的min/max计算（避免NaN转int错误）
            if not df.empty:
                idx_min = int(df[self.merge_key].min())
                idx_max = int(df[self.merge_key].max())
            else:
                idx_min = -1
                idx_max = -1
            
            self.metadata["data_stats"]["merged_processed"] = {
                "shape": df.shape,
                "idx_min": idx_min,
                "idx_max": idx_max,
                "missing_count": sum(missing_stats > 0),
                "missing_strategy": self.missing_strategy
            }
            logger.info(f"处理后合并特征已保存至: {self.intermediate_dirs['merged_processed']}")
            return True
        except Exception as e:
            logger.error(f"保存处理后合并特征失败: {str(e)}\n{traceback.format_exc()}")
            return False

    def _save_split_datasets_intermediate(self, train_df: pd.DataFrame, val_df: pd.DataFrame, test_df: pd.DataFrame):
        """保存拆分后的数据集中间文件"""
        try:
            # 保存训练集
            train_csv = os.path.join(self.intermediate_dirs["split_datasets"], "train_set.csv")
            train_df.to_csv(train_csv, index=False, encoding="utf-8")
            train_np = os.path.join(self.intermediate_dirs["split_datasets"], "train_set.npy")
            np.save(train_np, train_df.values)
            
            # 保存验证集
            val_csv = os.path.join(self.intermediate_dirs["split_datasets"], "val_set.csv")
            val_df.to_csv(val_csv, index=False, encoding="utf-8")
            val_np = os.path.join(self.intermediate_dirs["split_datasets"], "val_set.npy")
            np.save(val_np, val_df.values)
            
            # 保存测试集
            test_csv = os.path.join(self.intermediate_dirs["split_datasets"], "test_set.csv")
            test_df.to_csv(test_csv, index=False, encoding="utf-8")
            test_np = os.path.join(self.intermediate_dirs["split_datasets"], "test_set.npy")
            np.save(test_np, test_df.values)
            
            # 保存拆分信息
            split_info = {
                "train_shape": train_df.shape,
                "val_shape": val_df.shape,
                "test_shape": test_df.shape,
                "train_idx_range": [int(train_df[self.merge_key].min()) if not train_df.empty else -1, 
                                    int(train_df[self.merge_key].max()) if not train_df.empty else -1],
                "val_idx_range": [int(val_df[self.merge_key].min()) if not val_df.empty else -1, 
                                  int(val_df[self.merge_key].max()) if not val_df.empty else -1],
                "test_idx_range": [int(test_df[self.merge_key].min()) if not test_df.empty else -1, 
                                   int(test_df[self.merge_key].max()) if not test_df.empty else -1],
                "split_ratios": {
                    "train": self.train_ratio,
                    "val": self.val_ratio,
                    "test": self.test_ratio
                }
            }
            split_info_path = os.path.join(self.intermediate_dirs["split_datasets"], "split_info.json")
            with open(split_info_path, "w", encoding="utf-8") as f:
                json.dump(split_info, f, indent=2)
            
            self.metadata["data_stats"]["split_datasets"] = split_info
            logger.info(f"拆分数据集已保存至: {self.intermediate_dirs['split_datasets']}")
            return True
        except Exception as e:
            logger.error(f"保存拆分数据集失败: {str(e)}\n{traceback.format_exc()}")
            return False

    # ---------------------- 核心特征生成与合并方法 ----------------------
    def _generate_fractal_feats(self) -> Optional[pd.DataFrame]:
        """生成分形特征集(作为基准特征)"""
        try:
            logger.info("开始生成分形特征(基准特征)...")
            engine = MultiWindowFractalEngine(run_type=self.run_type)
            
            if not engine.do_fractal_features():
                logger.error("分形特征计算流程执行失败")
                return None
            
            feats = engine.final_fractal_feat_df  # 从引擎获取特征
            if feats is None or feats.empty:
                logger.error("分形特征生成结果为空")
                return None
            if self.merge_key not in feats.columns:
                logger.error(f"分形特征缺少合并主键[{self.merge_key}]")
                return None
            
            # 去重排序
            feats = feats.drop_duplicates(subset=[self.merge_key], keep="first").sort_values(self.merge_key)
            self.fractal_feats = feats
            
            # 保存中间文件
            self._save_fractal_intermediate(feats)
            
            logger.info(f"分形特征生成完成, 形状: {feats.shape}, idx范围: [{feats[self.merge_key].min() if not feats.empty else -1}, {feats[self.merge_key].max() if not feats.empty else -1}]")
            return feats
        except Exception as e:
            logger.error(f"分形特征生成失败: {str(e)}\n{traceback.format_exc()}")
            return None

    def _generate_plus30_feats(self) -> Optional[pd.DataFrame]:
        """生成plus30特征集(对齐到分形特征idx)"""
        try:
            if self.fractal_feats is None:
                logger.error("请先生成分形特征(需以其为基准)")
                return None
            
            logger.info("开始生成plus30特征...")
            engineer = SSQ30PlusFeatureEngineer(run_type=self.run_type)
            if not engineer.do_30plus_features():
                logger.error("plus30特征工程执行失败")
                return None
            
            feats = engineer.final_feat_df
            if feats is None or feats.empty:
                logger.error("plus30特征生成结果为空")
                return None
            
            # 剔除标签列, 对齐分形特征idx
            feats = feats.drop(columns=self.config_label_cols, errors="ignore")
            fractal_idx = set(self.fractal_feats[self.merge_key])
            feats = feats[feats[self.merge_key].isin(fractal_idx)].copy()
            feats = feats.drop_duplicates(subset=[self.merge_key], keep="first").sort_values(self.merge_key)
            
            self.plus30_feats = feats
            
            # 保存中间文件
            self._save_plus30_intermediate(feats)
            
            logger.info(f"plus30特征对齐完成, 形状: {feats.shape}, 有效idx数量: {len(feats)}")
            return feats
        except Exception as e:
            logger.error(f"plus30特征生成失败: {str(e)}\n{traceback.format_exc()}")
            return None

    def _load_01_data(self) -> Optional[pd.DataFrame]:
        """加载01格式化数据并对齐到分形特征idx（适配预测模式）"""
        try:
            if self.fractal_feats is None:
                logger.error("请先生成分形特征(需以其为基准)")
                return None
            
            logger.info("加载01格式化数据(r1-r33)...")
            if not os.path.exists(self.csv01_file):
                raise FileNotFoundError(f"01格式化数据文件不存在: {self.csv01_file}")
            
            # 加载并预处理
            df_01 = pd.read_csv(
                self.csv01_file,
                usecols=[self.merge_key] + self.red_cols_01,
                encoding="utf-8"
            )
            df_01[self.merge_key] = df_01[self.merge_key].astype(int)
            df_01[self.red_cols_01] = df_01[self.red_cols_01].replace(r'[^0-9]', 0, regex=True).astype(int)
            
            # 对齐分形特征idx
            fractal_idx = set(self.fractal_feats[self.merge_key])
            df_01 = df_01[df_01[self.merge_key].isin(fractal_idx)].copy()
            df_01 = df_01.drop_duplicates(subset=[self.merge_key], keep="first").sort_values(self.merge_key)
            
            # ---------------------- 核心修复: 预测模式下生成占位数据 ----------------------
            # 判断是否为预测模式, 且对齐后01数据为空, 但分形特征有数据
            if self.run_type == "predict" and df_01.empty and len(fractal_idx) > 0:
                logger.warning(f"预测模式: 01数据中无分形特征的idx({fractal_idx}), 生成全0占位数据")
                # 为分形特征中的所有idx（预测期）生成全0的01数据
                predict_idx_list = list(fractal_idx)
                # 构造全0数据
                zero_data_list = []
                for idx in predict_idx_list:
                    zero_data = {self.merge_key: idx}
                    # 所有红球列填0（占位, 因为预测期红球未知）
                    zero_data.update({col: 0 for col in self.red_cols_01})
                    zero_data_list.append(zero_data)
                # 转为DataFrame
                df_01 = pd.DataFrame(zero_data_list)
                logger.info(f"生成预测期01占位数据: {len(df_01)}期, idx: {predict_idx_list}")
            
            self.df_01 = df_01
            
            # 保存中间文件（修复后的数据, 非空）
            self._save_01data_intermediate(df_01)
            
            logger.info(f"01数据对齐完成, 形状: {df_01.shape}, 有效idx数量: {len(df_01)}")
            
            # ---------------------- 调整失败判定逻辑 ----------------------
            # 训练模式: 01数据为空则判定失败；预测模式: 生成占位数据后即使原数据空也判定成功
            if self.run_type == "train" and df_01.empty:
                logger.error("训练模式: 01数据对齐后为空, 加载失败")
                return None
            
            return df_01
        except Exception as e:
            logger.error(f"01数据加载失败: {str(e)}\n{traceback.format_exc()}")
            return None

    def _build_labels(self) -> Optional[pd.DataFrame]:
        """构造标签列(r1_next-r33_next)（适配预测模式）"""
        try:
            # 预测模式适配：跳过标签构造（待预测期下一期未知）
            if self.run_type == "predict":
                logger.warning("预测模式: 待预测期下一期红球未知，跳过真实标签构造，返回空标签DataFrame")
                # 返回空标签DataFrame（保持列名完整，避免后续合并报错）
                empty_label_data = {self.merge_key: []}
                empty_label_data.update({col: [] for col in self.label_cols})
                empty_label_df = pd.DataFrame(empty_label_data)
                # 保存空标签中间文件（保持流程一致性）
                self._save_labels_intermediate(empty_label_df)
                return empty_label_df
            
            # 训练模式：正常构造标签（原有逻辑不变）
            if self.df_01 is None:
                logger.error("请先加载01数据")
                return None
            
            logger.info("构造标签列(r1_next-r33_next)...")
            df_01_sorted = self.df_01.sort_values(self.merge_key).copy()
            
            # 下一期数据上移作为标签
            next_labels = df_01_sorted[self.red_cols_01].shift(-1).copy()
            next_labels.columns = self.label_cols
            
            # 拼接并过滤无效标签
            label_df = pd.concat([df_01_sorted[[self.merge_key]], next_labels], axis=1)
            label_df = label_df.dropna(subset=self.label_cols).copy()
            
            # 保存中间文件
            self._save_labels_intermediate(label_df)
            
            logger.info(f"标签构造完成, 形状: {label_df.shape}, 有效标签idx数量: {len(label_df)}")
            return label_df
        except Exception as e:
            logger.error(f"标签构造失败: {str(e)}\n{traceback.format_exc()}")
            return None

    def _add_feature_prefix(self, df: pd.DataFrame, prefix: str) -> pd.DataFrame:
        """为特征列添加前缀(避免合并冲突)"""
        key_col = df[[self.merge_key]]
        feature_cols = df.drop(columns=[self.merge_key])
        feature_cols = feature_cols.add_prefix(f"{prefix}_")
        return pd.concat([key_col, feature_cols], axis=1)

    def _merge_all_feats(self) -> Optional[pd.DataFrame]:
        """合并所有特征(分形+plus30+01数据+标签)（适配预测模式）"""
        try:
            # 分形特征添加前缀
            fractal_with_prefix = self._add_feature_prefix(self.fractal_feats, "fractal")
            logger.info(f'分形特征带前缀后形状: {fractal_with_prefix.shape}')
            
            # plus30特征添加前缀
            plus30_with_prefix = self._add_feature_prefix(self.plus30_feats.drop(columns=["date"], errors="ignore"), "plus30")
            logger.info(f'plus30特征带前缀后形状: {plus30_with_prefix.shape}')

            # 合并分形和plus30特征
            merged = pd.merge(fractal_with_prefix, plus30_with_prefix, on=self.merge_key, how="inner")
            
            # 合并01数据
            merged = pd.merge(merged, self.df_01, on=self.merge_key, how="inner")
            
            # 合并标签（区分训练/预测模式）
            label_df = self._build_labels()
            if label_df is None:
                logger.error("标签数据为空, 合并终止")
                return None
            
            # 预测模式适配：标签为空时，添加全0占位标签列（保持列名与训练模式一致）
            if self.run_type == "predict" and label_df.empty:
                logger.warning("预测模式: 为特征矩阵添加全0占位标签列（不影响模型预测）")
                for col in self.label_cols:
                    merged[col] = 0  # 全0占位
            else:
                # 训练模式：正常合并标签
                merged = pd.merge(merged, label_df, on=self.merge_key, how="inner")

            # 删除日期列(如存在)
            date_cols = [col for col in merged.columns if 'date' in col.lower()]
            if date_cols:
                merged = merged.drop(columns=date_cols)
                logger.info(f"删除无关日期列: {date_cols}")
            
            # 保存原始合并结果
            self._save_merged_raw_intermediate(merged)
            
            logger.info(f"所有特征合并完成, 最终形状: {merged.shape}, 包含{len(merged.columns)}列(含标签)")
            return merged
        except Exception as e:
            logger.error(f"特征合并失败: {str(e)}\n{traceback.format_exc()}")
            return None

    def _handle_missing_values(self, df: pd.DataFrame) -> pd.DataFrame:
        """处理缺失值(特征列和标签列分开处理)"""
        logger.info(f"开始处理缺失值(策略: {self.missing_strategy})...")
        # 分离特征列和标签列
        feature_cols = [col for col in df.columns if col not in [self.merge_key] + self.label_cols]
        numeric_feats = df[feature_cols].select_dtypes(include=["int64", "float64"]).columns
        
        # 处理特征列缺失值
        if self.missing_strategy == "mean":
            df[numeric_feats] = df[numeric_feats].fillna(df[numeric_feats].mean())
        elif self.missing_strategy == "median":
            df[numeric_feats] = df[numeric_feats].fillna(df[numeric_feats].median())
        elif self.missing_strategy == "zero":
            df[numeric_feats] = df[numeric_feats].fillna(0)
        else:
            logger.warning(f"未支持的缺失值策略[{self.missing_strategy}], 不处理特征缺失值")
        
        # 过滤标签缺失数据（仅训练模式，预测模式标签是占位0，无需过滤）
        if self.run_type == "train":
            label_missing = df[self.label_cols].isnull().any(axis=1).sum()
            if label_missing > 0:
                logger.warning(f"标签列存在{label_missing}条缺失值, 已自动过滤")
                df = df.dropna(subset=self.label_cols)
        
        # 保存处理后结果
        self._save_merged_processed_intermediate(df)
        
        return df

    def _split_dataset(self, df: pd.DataFrame) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
        """时序拆分数据集(训练集→验证集→测试集)（适配预测模式）"""
        try:
            # 预测模式适配：无需拆分数据集（仅返回全量数据作为预测集）
            if self.run_type == "predict":
                logger.warning("预测模式: 无需拆分数据集，返回全量数据作为预测集")
                return df, pd.DataFrame(), pd.DataFrame()
            
            # 训练模式：正常拆分（原有逻辑不变）
            logger.info("开始时序拆分数据集...")
            
            # 强制按idx升序排序(防数据泄露)
            df_sorted = df.sort_values(self.merge_key).reset_index(drop=True)
            total_samples = len(df_sorted)
            logger.info(f"待拆分总样本数: {total_samples}")
            
            # 计算拆分索引
            train_idx = int(total_samples * self.train_ratio)
            val_idx = train_idx + int(total_samples * self.val_ratio)
            
            # 拆分数据集
            train_df = df_sorted.iloc[:train_idx].copy()
            val_df = df_sorted.iloc[train_idx:val_idx].copy()
            test_df = df_sorted.iloc[val_idx:].copy()
            
            # 校验拆分结果
            logger.info(f"拆分完成:")
            logger.info(f"  训练集: {len(train_df)}样本 (占比: {len(train_df)/total_samples:.2%})")
            logger.info(f"  验证集: {len(val_df)}样本 (占比: {len(val_df)/total_samples:.2%})")
            logger.info(f"  测试集: {len(test_df)}样本 (占比: {len(test_df)/total_samples:.2%})")
            
            # 保存拆分结果
            self._save_split_datasets_intermediate(train_df, val_df, test_df)
            
            return train_df, val_df, test_df
        except Exception as e:
            logger.error(f"数据集拆分失败: {str(e)}\n{traceback.format_exc()}")
            raise

    def _save_split_datasets(self, train_df: pd.DataFrame, val_df: pd.DataFrame, test_df: pd.DataFrame) -> bool:
        """保存最终拆分数据集(供模型训练直接使用)（适配预测模式）"""
        try:
            # 预测模式适配：仅保存全量预测数据（不保存验证集/测试集）
            if self.run_type == "predict":
                if not train_df.empty:  # train_df实际是预测集
                    # 直接使用merged_save_path保存预测特征（沿用训练模式的文件名）
                    train_df.to_csv(self.merged_save_path, index=False, encoding="utf-8")
                    logger.info(f"预测模式: 预测特征已保存至: {self.merged_save_path}")
                return True
            
            # 训练模式：正常保存拆分数据集（原有逻辑不变）
            train_df.to_csv(self.train_save_path, index=False, encoding="utf-8")
            val_df.to_csv(self.val_save_path, index=False, encoding="utf-8")
            test_df.to_csv(self.test_save_path, index=False, encoding="utf-8")
            logger.info(f"最终训练集/验证集/测试集已保存至配置路径")
            return True
        except Exception as e:
            logger.error(f"保存最终拆分数据集失败: {str(e)}\n{traceback.format_exc()}")
            return False
   
    def _save_merged_feats(self, df: pd.DataFrame) -> bool:
        """保存完整合并特征矩阵"""
        try:
            df.to_csv(self.merged_save_path, index=False, encoding="utf-8")
            logger.info(f"完整合并特征已保存至: {self.merged_save_path}")
            return True
        except Exception as e:
            logger.error(f"保存合并特征失败: {str(e)}\n{traceback.format_exc()}")
            return False
   
    def do_features(self) -> bool:
        """执行完整流程: 生成特征→合并→处理缺失值→拆分→保存（适配预测模式）"""
        try:
            logger.info("="*80)
            logger.info(f"开始执行特征融合全流程（运行模式：{self.run_type}）")
            logger.info("="*80)
            
            # 1. 生成分形特征(基准)
            if self._generate_fractal_feats() is None:
                logger.critical("分形特征生成失败, 流程终止!")
                return False
            
            # 2. 生成plus30特征并对齐
            plus30_feats = self._generate_plus30_feats()
            if plus30_feats is None or plus30_feats.empty:
                logger.critical("30+特征生成失败!")
                return False
            
            # 3. 加载01数据并对齐
            load_01_data = self._load_01_data()
            if load_01_data is None:
                logger.critical(f"加载01数据失败!")
                return False
            elif load_01_data.empty:
                # 只有训练模式下空数据才终止, 预测模式下允许（已生成占位数据）
                if self.run_type == "train":
                    logger.critical("训练模式: 01数据为空, 程序终止!")
                    return False
                else:
                    logger.warning("预测模式: 01数据为空, 但已生成占位数据, 继续执行")

            
            # 4. 合并所有特征和标签
            merged_feats = self._merge_all_feats()
            if merged_feats is None or merged_feats.empty:
                logger.critical(f"合并特征失败!")
                return False
            
            # 5. 处理缺失值
            logger.info("处理缺失值...")
            merged_feats = self._handle_missing_values(merged_feats)
            logger.info(f"缺失值处理完成, 剩余样本数: {len(merged_feats)}")
            
            # 6. 保存完整合并文件
            if not self._save_merged_feats(merged_feats):
                logger.warning(f"保存完整合并特征失败, 继续执行后续步骤")
            
            # 7. 拆分数据集
            train_df, val_df, test_df = self._split_dataset(merged_feats)
            
            # 8. 保存最终拆分数据集
            if not self._save_split_datasets(train_df, val_df, test_df):
                logger.critical(f"保存最终数据集失败!")
                return False
            
            # 9. 保存完整元数据
            self._save_intermediate_metadata()
            
            self.feat_df = merged_feats
            logger.info("="*80)
            logger.info(f"特征融合+数据集拆分+中间文件保存全流程完成!（运行模式：{self.run_type}）")
            logger.info(f"中间文件版本: {self.data_version}, 根目录: {self.intermediate_root}")
            logger.info(f"最终特征保存路径: {self.merged_save_path}")
            logger.info("="*80)
            return True
            
        except Exception as e:
            logger.critical(f"总流程失败: {str(e)}\n{traceback.format_exc()}")
            return False


if __name__ == "__main__":
    import sys
    # 命令行参数支持：python merge_features.py train 或 python merge_features.py predict
    if len(sys.argv) != 2:
        print("用法：python merge_features.py [train|predict]")
        exit(1)
    run_type = sys.argv[1]
    try:
        merger = MergeFeatures(run_type=run_type)
        if merger.do_features():
            logger.info(f"{run_type}模式特征工程全流程成功完成!")
            exit(0)
        else:
            logger.error(f"{run_type}模式流程执行失败")
            exit(1)
    except Exception as e:
        logger.critical(f"流程异常终止: {str(e)}\n{traceback.format_exc()}")
        exit(-1)