# -*- coding:utf-8 -*-
"""
双色球30+维度特征工程核心类(多窗口多进程版)- Ubuntu适配版
移除Windows系统特有代码, 优化Linux环境下多进程性能
"""
import pandas as pd
import numpy as np
import re
import multiprocessing
from scipy.stats import entropy
from collections import defaultdict
import os
import traceback
from config.ssq_config import init_global_logger, logger, SSQ_CONFIG, FEATURE_ENGINEER, DATA_FOLDER
import lightgbm as lgb
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import joblib

# 全局变量: 用于进程间共享配置(避免重复加载)
GLOBAL_CONFIG = {
    "red_balls": list(range(1, SSQ_CONFIG['max_red_ball']+1)),
    "zone_map": {1: range(1, 12), 2: range(12, 23), 3: range(23, 34)}
}


class SSQ30PlusFeatureEngineer:
    def __init__(self, run_type, predict_issue=None):  # 加默认值None
        """初始化特征工程类, 加载配置参数"""
        # 校验数据类型合法性
        if run_type not in ["train", "predict"]:
            raise ValueError(f"数据类型错误！仅支持 'train' 或 'predict', 当前输入: {run_type}")
        self.run_type = run_type

        # 统一初始化 predict_issue（训练模式传None，预测模式传真实期号）
        self.predict_issue = predict_issue  # 这样不管什么模式，都有这个属性

        # 核心窗口参数(13/34/55)
        self.windows = SSQ_CONFIG['feature_window_size']  # 三个核心窗口
        
        # 配置参数
        self.feat_config = FEATURE_ENGINEER
        self.red_cols_01 = [
            col for col in SSQ_CONFIG['csv01_header'] 
            if col.startswith('r') and col[1:].isdigit()  # r1-r33
        ]

        # 数据存储变量
        self.raw_df = None  # 原始开奖数据
        self.df_01 = None   # 01格式化数据
        self.fractal_df = None  # 分形特征数据
        self.final_feat_df = None  # 最终特征矩阵
        self.label_df = None  # 标签数据
        self.min_valid_issue = None  # 最小有效期号(分形特征为基准)

        # 窗口特征保存配置: 根据数据类型确定保存文件名(从配置文件读取, 避免硬编码)
        self.data_folder = SSQ_CONFIG['train_file']['DATA_FOLDER']
        self.feat_cache_folder = SSQ_CONFIG['train_file']['model_save_dir']

        self.raw_path = os.path.join(self.data_folder, SSQ_CONFIG['train_file']['csv_file'])
        self.df01_path = os.path.join(self.data_folder,SSQ_CONFIG["train_file"]["csv01_file"])
        self.fractal_path = os.path.join(self.data_folder,SSQ_CONFIG["train_file"]["full_fractal_feature_file"])
        self.full_plus30_features = os.path.join(self.data_folder,SSQ_CONFIG["train_file"]["full_plus30_features"])
        self.plus30_folder = os.path.join(self.data_folder,SSQ_CONFIG["train_file"]["plus30_folder"])

        # 在预测阶段仍然采用训练时的 “复用训练时的筛选规则和 PCA 模型”
        self.pca_pkl = os.path.join(self.feat_cache_folder,'train_pca.pkl')
        self.scaler_pkl = os.path.join(self.feat_cache_folder,'train_scaler.pkl')
        self.pca_dim_pkl = os.path.join(self.feat_cache_folder,'train_pca_dim.pkl')
        self.train_selected_features = os.path.join(self.feat_cache_folder,'train_selected_features.pkl')
        if self.run_type == "predict":
            self.data_folder = SSQ_CONFIG['predict_file']['DATA_FOLDER']
            self.feat_cache_folder = SSQ_CONFIG['predict_file']['model_save_dir']

            self.raw_path = os.path.join(self.data_folder, SSQ_CONFIG['predict_file']['csv_file'])
            self.df01_path = os.path.join(self.data_folder,SSQ_CONFIG["predict_file"]["csv01_file"])
            self.fractal_path = os.path.join(self.data_folder,SSQ_CONFIG["predict_file"]["full_fractal_feature_file"])
            self.full_plus30_features = os.path.join(self.data_folder,SSQ_CONFIG["predict_file"]["full_plus30_features"])
            self.plus30_folder = os.path.join(self.data_folder,SSQ_CONFIG["predict_file"]["plus30_folder"])

        for tmp_dir in [self.data_folder,self.plus30_folder,self.feat_cache_folder]:
            os.makedirs(tmp_dir, exist_ok=True)  # 确保目录存在

        # 优化配置参数        
        self.feat_importance_threshold = 0.001  # 弱特征数最低限度
        self.pca_n_components = 60              # PCA降维(目标维度
        self.correlation_threshold = 0.8        # 高相关性特征剔除
        self.merge_key = "idx"                  # 对齐键

        init_global_logger(log_file=f"feature_engineer_{self.run_type}.log", pid=os.getpid())

    def load_basic_data(self):
        """加载基础数据(原始开奖数据 + 01格式化数据)"""
        logger.info(f'加载基础数据(原始开奖数据 + 01格式化数据) - 模式：{self.run_type}')
        try:
            # 1. 加载原始开奖数据
            if not os.path.exists(self.raw_path):
                raise FileNotFoundError(f"原始开奖数据文件不存在: {self.raw_path}")
            self.raw_df = pd.read_csv(
                self.raw_path,
                usecols=["idx", "date", "r1", "r2", "r3", "r4", "r5", "r6", "b1"],
                encoding="utf-8"
            ).sort_values("idx").reset_index(drop=True)

            self.raw_df["idx"] = self.raw_df["idx"].astype(int)
            self.raw_df["red_list"] = self.raw_df[["r1", "r2", "r3", "r4", "r5", "r6"]].values.tolist()
            
            # 预测模式：确定待预测期号（原始数据最后一期+1）
            if self.run_type == "predict":
                self.predict_issue = self.raw_df["idx"].max() + 1
                logger.info(f"预测模式 - 历史数据共{len(self.raw_df)}期，待预测期号：{self.predict_issue}")
            else:
                logger.info(f"训练模式 - 原始开奖数据加载完成, 共{len(self.raw_df)}期, idx范围: [{self.raw_df['idx'].min()}, {self.raw_df['idx'].max()}]")

            # 2. 加载01格式化数据
            if not os.path.exists(self.df01_path):
                raise FileNotFoundError(f"01格式化数据文件不存在: {self.df01_path}")
            
            df01_usecols = ["idx"] + self.red_cols_01  # 包含'idx'和r1-r33
            self.df_01 = pd.read_csv(
                self.df01_path,
                usecols=df01_usecols,
                encoding="utf-8"
            ).sort_values("idx").reset_index(drop=True)

            # 转换r1-r33为整数
            self.df_01[self.red_cols_01] = self.df_01[self.red_cols_01].replace(to_replace=r'[^0-9]', value=0, regex=True).astype(int)
            self.df_01["idx"] = self.df_01["idx"].astype(int)

            # 校验01列
            missing_cols = [col for col in self.red_cols_01 if col not in self.df_01.columns]
            if missing_cols:
                raise ValueError(f"01数据缺少必要列: {missing_cols}")
            
            logger.info(f"01格式化数据加载完成, 共{len(self.df_01)}期, idx范围: [{self.df_01['idx'].min()}, {self.df_01['idx'].max()}]")

            # 3. 构建标签（仅训练模式）
            if self.run_type == "train":
                self._build_label()
            else:
                # 预测模式：标签_df设为None，避免后续对齐冲突
                self.label_df = None
                logger.info("预测模式 - 无需构建训练标签")

            return True
        except Exception as e:
            logger.error(f"加载基础数据失败: {str(e)}{traceback.format_exc()}")
            return False
        
    def merge_01_features(self):
        """合并01序列化红球列(仅用于标签构造, 特征中不包含)"""
        if self.final_feat_df is None:
            logger.error("最终特征矩阵为空, 无法合并01列")
            return False
        
        if self.df_01 is None or not set(self.red_cols_01).issubset(self.df_01.columns):
            logger.error("01数据未正确加载, 无法合并")
            return False
        
        # 处理重复idx
        final_duplicates = self.final_feat_df[self.final_feat_df["idx"].duplicated(keep=False)]
        if not final_duplicates.empty:
            dup_idx_final = final_duplicates["idx"].unique()
            logger.warning(f"final_feat_df中存在重复idx: {dup_idx_final}, 将保留首次出现的记录")
            self.final_feat_df = self.final_feat_df.drop_duplicates(subset=["idx"], keep="first").reset_index(drop=True)
        
        valid_idx = self.final_feat_df["idx"].unique()
        df_01_filtered = self.df_01[self.df_01["idx"].isin(valid_idx)].copy()
        
        df01_duplicates = df_01_filtered[df_01_filtered["idx"].duplicated(keep=False)]
        if not df01_duplicates.empty:
            dup_idx_01 = df01_duplicates["idx"].unique()
            logger.warning(f"df_01_filtered中存在重复idx: {dup_idx_01}, 将保留首次出现的记录")
            df_01_filtered = df_01_filtered.drop_duplicates(subset=["idx"], keep="first").reset_index(drop=True)
        
        logger.info(f"01数据过滤后保留{len(df_01_filtered)}期(与分形特征idx对齐)")
        
        # 合并前校验
        if self.final_feat_df["idx"].duplicated().any() or df_01_filtered["idx"].duplicated().any():
            logger.error("合并前存在重复idx, 合并终止")
            return False
        
        # 预测模式：仅合并待预测期的01列（若存在）
        if self.run_type == "predict":
            df_01_filtered = df_01_filtered[df_01_filtered["idx"] == self.predict_issue].copy()
            logger.info(f"预测模式 - 仅保留待预测期{self.predict_issue}的01列数据")

        logger.info(f"01列合并完成, 最终特征矩阵形状: {self.final_feat_df.shape}")
        return True
    
    def build_label_columns(self):
        """从df_01中提取r1-r33构造标签列(r1_next - r33_next)"""
        try:
            if self.final_feat_df is None:
                logger.error("最终特征矩阵为空, 无法构造标签列")
                return False
            if self.df_01 is None:
                logger.error("df_01数据为空, 无法构造标签列")
                return False
            
            # 处理特征列缺失值
            feat_cols = [col for col in self.final_feat_df.columns if col not in ["idx", "date"]]
            self.final_feat_df[feat_cols] = self.final_feat_df[feat_cols].ffill().bfill()
            logger.info("特征列缺失值填充完成")
            
            # 对齐df_01与final_feat_df的idx
            df_01_aligned = self.df_01[self.df_01["idx"].isin(self.final_feat_df["idx"])].copy()
            df_01_aligned = df_01_aligned.sort_values("idx").reset_index(drop=True)
            
            # 训练模式：构造下一期标签(上移一期)
            if self.run_type == "train":
                # 构造下一期标签(上移一期)
                next_labels = df_01_aligned[self.red_cols_01].shift(-1).copy()
                next_labels.columns = [f"{col}_next" for col in next_labels.columns]
                
                # 合并标签(按index对齐)
                if len(df_01_aligned) != len(self.final_feat_df):
                    logger.warning(f"df_01与final_feat_df的期数不匹配({len(df_01_aligned)} vs {len(self.final_feat_df)}), 强制按index对齐")
                
                self.final_feat_df = pd.concat([self.final_feat_df, next_labels], axis=1)
                
                # 分离训练数据和预测数据（训练模式下无待预测数据，仅过滤空标签）
                self.train_df = self.final_feat_df.dropna(subset=[f"{col}_next" for col in self.red_cols_01]).reset_index(drop=True)
                self.predict_df = pd.DataFrame()  # 训练模式下预测数据为空
                logger.info(f"训练模式 - 标签列构造完成: 训练数据{len(self.train_df)}期")
            
            # 预测模式：标签列设为NaN（仅保留特征，标签由模型预测）
            else:
                next_labels = pd.DataFrame(
                    np.nan,
                    index=self.final_feat_df.index,
                    columns=[f"{col}_next" for col in self.red_cols_01]
                )
                self.final_feat_df = pd.concat([self.final_feat_df, next_labels], axis=1)
                self.predict_df = self.final_feat_df.copy()
                self.train_df = pd.DataFrame()  # 预测模式下训练数据为空
                logger.info(f"预测模式 - 标签列构造完成: 待预测数据{len(self.predict_df)}期（标签列设为NaN）")

            return True
        except Exception as e:
            logger.error(f"构造标签列异常: {str(e)}{traceback.format_exc()}")
            return False
        
    def _align_feat_with_label(self, feat_df):
        """将特征数据与标签的idx强制对齐"""
        """将特征数据与标签的idx强制对齐（仅训练模式有效）"""
        if self.run_type == "predict" or self.label_df is None or self.label_df.empty:
            logger.info("预测模式/标签为空，跳过特征-标签对齐")
            return feat_df
        
        # 处理重复idx
        if feat_df["idx"].duplicated().any():
            duplicate_idx = feat_df[feat_df["idx"].duplicated(keep=False)]["idx"].unique()
            logger.error(f"特征数据中存在{len(duplicate_idx)}个重复idx值: {duplicate_idx.tolist()}")
            feat_df = feat_df.drop_duplicates(subset=["idx"], keep="first").reset_index(drop=True)
        
        # 筛选标签范围内的期号
        label_idx_set = set(self.label_df["idx"])
        feat_df = feat_df[feat_df["idx"].isin(label_idx_set)].reset_index(drop=True)
        
        # 按最小有效期号筛选
        if self.min_valid_issue is not None:
            feat_df = feat_df[feat_df["idx"] >= self.min_valid_issue].reset_index(drop=True)
        
        logger.info(f"训练模式 - 特征对齐完成, 有效期数: {len(feat_df)}期(最小期号: {feat_df['idx'].min() if not feat_df.empty else '无'})")
        return feat_df

    def _build_label(self):
        """构建标签(仅训练模式)，确保idx在特征数据范围内"""
        if self.run_type != "train" or self.raw_df is None or self.df_01 is None:
            logger.info("非训练模式/未加载基础数据, 不构建标签")
            self.label_df = None
            return
        
        max_feat_idx = self.df_01["idx"].max()
        label_df = pd.DataFrame({
            "idx": self.raw_df["idx"].iloc[:-1],
            "label": self.df_01["r1"].iloc[1:].values
        })
        
        # 过滤超出范围的标签
        original_count = len(label_df)
        label_df = label_df[label_df["idx"] <= max_feat_idx]
        filtered_count = original_count - len(label_df)
        if filtered_count > 0:
            logger.warning(f"标签idx中有{filtered_count}条超出特征数据范围, 已过滤")
        
        label_df = label_df.drop_duplicates(subset=["idx"], keep="first")
        self.label_df = label_df
        logger.info(f"训练模式 - 标签数据构建完成, 共{len(label_df)}条, idx范围: [{label_df['idx'].min()}, {label_df['idx'].max()}]")

    def load_aligned_fractal_features(self):
        """加载对齐后的多窗口分形特征(13/34/55窗口)"""
        try:
            logger.info(f'加载对齐后的多窗口分形特征(13/34/55窗口) - 模式：{self.run_type}')
            if not os.path.exists(self.fractal_path):
                raise FileNotFoundError(f"分形特征文件不存在: {self.fractal_path}")
            
            # 加载分形特征
            self.fractal_df = pd.read_csv(self.fractal_path, encoding="utf-8")
            self.fractal_df["idx"] = self.fractal_df["idx"].astype(int)
            self.fractal_df = self.fractal_df.drop_duplicates(subset=["idx"], keep="first").reset_index(drop=True)

            # 自动识别分形特征列(按窗口区分)
            fractal_cols_pattern = r'^(fractal_dim_|fractal_r2_|avg_box_count_|scale_min_|scale_max_|scale_span_)(13|34|55)'
            self.fractal_feat_cols = [col for col in self.fractal_df.columns if re.match(fractal_cols_pattern, col)]
            logger.info(f"识别到分形特征列(按窗口): {self.fractal_feat_cols}")

            # 验证分形特征列类型
            for col in self.fractal_feat_cols:
                if self.fractal_df[col].dtype == 'object':
                    logger.warning(f"分形特征列{col}仍为object类型, 尝试强制转换为float")
                    self.fractal_df[col] = pd.to_numeric(self.fractal_df[col], errors='coerce')

            # 预测模式：仅保留待预测期的分形特征
            if self.run_type == "predict":
                self.fractal_df = self.fractal_df[self.fractal_df["idx"] == self.predict_issue].copy()
                if self.fractal_df.empty:
                    raise ValueError(f"分形特征文件中未找到待预测期{self.predict_issue}的特征")
                logger.info(f"预测模式 - 筛选后分形特征: {len(self.fractal_df)}期（仅待预测期）")
            else:
                # 训练模式：筛选标签范围内的期号
                label_idx_set = set(self.label_df["idx"]) if self.label_df is not None else set()
                if label_idx_set:
                    before_filter_count = len(self.fractal_df)
                    self.fractal_df = self.fractal_df[self.fractal_df["idx"].isin(label_idx_set)].reset_index(drop=True)
                    after_filter_count = len(self.fractal_df)
                    logger.info(f"训练模式 - 分形特征过滤前{before_filter_count}期, 过滤后{after_filter_count}期")

            # 确定最小有效期号(所有特征的基准)
            if self.run_type == "train":
                self.min_valid_issue = self.fractal_df["idx"].min()
                logger.info(f"训练模式 - 以分形特征为基准, 确定所有特征的最小有效期号: {self.min_valid_issue}")

                # 调整标签范围
                self.label_df = self.label_df[self.label_df["idx"] >= self.min_valid_issue].reset_index(drop=True)
                logger.info(f"训练模式 - 标签数据调整后: 共{len(self.label_df)}条, idx范围: [{self.label_df['idx'].min()}, {self.label_df['idx'].max()}]")
            else:
                self.min_valid_issue = self.predict_issue  # 预测模式最小有效期号=待预测期号

            logger.info(f"分形特征加载完成, 共{len(self.fractal_df)}期")
            return True
        except Exception as e:
            logger.error(f"加载分形特征失败: {str(e)}{traceback.format_exc()}")
            return False

    # ---------------------- 并行计算核心: 窗口特征计算函数(静态方法, 支持多进程) ----------------------
    @staticmethod
    def _calc_window_features(window, raw_df, df_01, red_cols_01, label_df, min_valid_issue, save_dir, run_type, predict_issue):
        """
        单个窗口的特征计算(多进程执行)
        返回: 当前窗口的合并特征DataFrame
        """
        # 子进程独立初始化日志
        init_global_logger(log_file=f"window_{window}_feature_{run_type}.log", pid=os.getpid())
        logger.info(f"===== 开始计算窗口{window}的所有特征(进程PID: {os.getpid()}, 模式: {run_type}) =====")

        try:
            # 1. 基础统计特征
            basic_feat = SSQ30PlusFeatureEngineer._calculate_basic_stat_features(
                window, raw_df, df_01, red_cols_01, label_df, min_valid_issue, run_type, predict_issue
            )
            if basic_feat is None or basic_feat.empty:
                logger.error(f"窗口{window}基础统计特征计算失败")
                return None

            # 2. 时序动态特征
            temporal_feat = SSQ30PlusFeatureEngineer._calculate_temporal_dynamic_features(
                window, raw_df, df_01, red_cols_01, label_df, min_valid_issue, run_type, predict_issue
            )
            if temporal_feat is None or temporal_feat.empty:
                logger.error(f"窗口{window}时序动态特征计算失败")
                return None

            # 3. 关联关系特征
            correlation_feat = SSQ30PlusFeatureEngineer._calculate_correlation_features(
                window, raw_df, df_01, red_cols_01, label_df, min_valid_issue, run_type, predict_issue
            )
            if correlation_feat is None or correlation_feat.empty:
                logger.error(f"窗口{window}关联关系特征计算失败")
                return None

            # 4. 高级衍生特征
            advanced_feat = SSQ30PlusFeatureEngineer._calculate_advanced_derived_features(
                window, raw_df, df_01, red_cols_01, label_df, min_valid_issue, run_type, predict_issue
            )
            if advanced_feat is None or advanced_feat.empty:
                logger.error(f"窗口{window}高级衍生特征计算失败")
                return None

            # 合并当前窗口的所有特征
            window_merged = basic_feat.drop_duplicates(subset=["idx"], keep="first")
            window_merged = window_merged.loc[:, ~window_merged.columns.duplicated(keep='first')]

            # 依次合并其他特征(剔除重复date列)
            for feat_name, feat_df in [
                ("时序动态特征", temporal_feat),
                ("关联关系特征", correlation_feat),
                ("高级衍生特征", advanced_feat)
            ]:
                feat_df = feat_df.copy().drop(columns=["date"], errors="ignore")
                # 先执行合并, 得到临时DataFrame
                merged_temp = pd.merge(window_merged, feat_df, on="idx", how="inner")
                # 对合并后的临时DataFrame计算重复列, 确保索引长度匹配
                '''
                merged_temp.columns.duplicated(keep='first'): 
                    检查合并后的列是否有重复, 返回一个布尔数组(True表示该列是重复列, False表示非重复列)。keep='first'表示保留第一次出现的列, 标记后续重复的列。
                    ~: 对布尔数组取反(True变False, False变True), 得到 “非重复列” 的索引。
                    merged_temp.loc[:, ...]: 根据上述索引选择非重复列, 删除重复的列。
                '''
                window_merged = merged_temp.loc[:, ~merged_temp.columns.duplicated(keep='first')]

            # 保存当前窗口特征到CSV
            save_path = os.path.join(save_dir, f"30plus_features_{window}_{run_type}.csv")
            window_merged.to_csv(save_path, index=False, encoding="utf-8")
            logger.info(f"窗口{window}特征保存完成: {save_path}")
            logger.info(f"窗口{window}特征详情: {len(window_merged)}期, {len(window_merged.columns)}个特征")

            return window_merged
        except Exception as e:
            logger.error(f"窗口{window}特征计算失败: {str(e)}{traceback.format_exc()}")
            return None

    # ---------------------- (1)基础统计特征(并行兼容版) ----------------------
    @staticmethod
    def _calculate_basic_stat_features(window, raw_df, df_01, red_cols_01, label_df, min_valid_issue, run_type, predict_issue):
        """按指定窗口计算基础统计特征"""
        logger.info(f"开始计算窗口{window}的基础统计特征(模式: {run_type})...")
        # 预测模式：仅生成待预测期的特征
        if run_type == "predict":
            feat_df = pd.DataFrame({"idx": [predict_issue], "date": [pd.Timestamp.now().strftime("%Y-%m-%d")]})
        else:
            feat_df = pd.DataFrame({"idx": raw_df["idx"], "date": raw_df["date"]})
        red_balls = GLOBAL_CONFIG["red_balls"]
        zone_map = GLOBAL_CONFIG["zone_map"]

        # 区间熵计算函数
        def calc_zone_entropy(ratios):
            ratios = [r + 1e-8 for r in ratios]
            return entropy(ratios, base=3)

        # 1. 频率特征
        freq_data = {}
        for ball in red_balls:
            col = f"r{ball}"
            if run_type == "predict":
                # 预测模式：历史所有数据的频率（窗口大小=历史总期数）
                total_periods = len(df_01)
                freq = df_01[col].sum() / total_periods if total_periods > 0 else 0
                freq_data[f"red_{ball}_freq_{window}"] = [freq]
            else:
                # 训练模式：滚动窗口频率
                freq_data[f"red_{ball}_freq_{window}"] = df_01[col].rolling(window=window, min_periods=1).sum()
                
        freq_df = pd.DataFrame(freq_data, index=feat_df.index)
        feat_df = pd.concat([feat_df, freq_df], axis=1)
        feat_df[f"avg_freq_{window}"] = feat_df[[f"red_{ball}_freq_{window}" for ball in red_balls]].mean(axis=1)

        # 2. 温号指数
        def calc_warm_ratio(row):
            current_idx = row["idx"]
            if run_type == "predict":
                # 预测模式：基于所有历史数据计算
                past_data = df_01.copy()
            else:
                if current_idx < min_valid_issue:
                    return np.nan
                past_data = df_01[df_01["idx"] < current_idx].tail(window)
            if current_idx < min_valid_issue:
                return np.nan
            past_data = df_01[df_01["idx"] < current_idx].tail(window)
            if len(past_data) < 5:
                return 0.4
            ball_counts = past_data[red_cols_01].sum()
            warm_count = ((ball_counts >= 1) & (ball_counts <= 2)).sum()
            return warm_count / len(red_cols_01) if len(red_cols_01) > 0 else 0.0
        feat_df[f"warm_ratio_{window}"] = feat_df.apply(calc_warm_ratio, axis=1)

        # 3. 遗漏特征
        miss_data = {}
        for ball in red_balls:
            col = f"r{ball}"
            if run_type == "predict":
                # 预测模式：计算历史最后一次出现到当前的遗漏期数
                last_occur_idx = df_01[df_01[col] == 1].index[-1] if df_01[col].sum() > 0 else len(df_01)
                miss = len(df_01) - last_occur_idx
                miss_data[f"red_{ball}_current_miss_{window}"] = [min(miss, window)]
            else:
                # 训练模式：滚动遗漏期数
                current_miss = []
                last_occur_idx = -1
                for i, val in enumerate(df_01[col]):
                    if val == 1:
                        current_miss.append(0)
                        last_occur_idx = i
                    else:
                        miss = i - last_occur_idx if last_occur_idx != -1 else i + 1
                        current_miss.append(min(miss, window))
                miss_data[f"red_{ball}_current_miss_{window}"] = current_miss
        miss_df = pd.DataFrame(miss_data, index=raw_df.index)
        feat_df = pd.concat([feat_df, miss_df], axis=1)

        # 4. 冷号特征
        cold_threshold = window

        def calc_cold_ratio(row):
            idx = row["idx"]
            current_red = []
            if run_type == "predict":
                # 预测模式：筛选满足冷号阈值的红球（可能为空）
                current_red = [ball for ball in red_balls if row[f"red_{ball}_current_miss_{window}"] >= cold_threshold]
            else:
                if idx < min_valid_issue:
                    return np.nan
                current_red = raw_df[raw_df["idx"] == idx]["red_list"].iloc[0]
            
            # 核心修复：判断current_red是否为空，避免除零
            if not current_red:  # 为空时返回默认值0.0（无冷号）
                return 0.0
            
            cold_count = 0
            for ball in current_red:
                # 修复：直接从row中获取miss值（原逻辑row[feat_df["idx"] == idx]错误，row是Series不是DataFrame）
                miss_val = row[f"red_{ball}_current_miss_{window}"]
                if miss_val >= cold_threshold:
                    cold_count += 1
            
            # 最多6个红球，避免分母异常（即使current_red超6个也按6计算）
            denominator = min(len(current_red), 6)
            return cold_count / denominator if denominator != 0 else 0.0

        def check_cold_back(row):
            idx = row["idx"]
            current_red = []
            if run_type == "predict":
                current_red = [ball for ball in red_balls if row[f"red_{ball}_current_miss_{window}"] >= cold_threshold]
            else:
                if idx < min_valid_issue:
                    return np.nan
                current_red = raw_df[raw_df["idx"] == idx]["red_list"].iloc[0]
            
            # 核心修复：current_red为空时返回0（无冷号回补）
            if not current_red:
                return 0
            
            for ball in current_red:
                # 修复：直接从row中获取miss值（原逻辑错误）
                miss_val = row[f"red_{ball}_current_miss_{window}"]
                if miss_val >= cold_threshold:
                    return 1
            return 0

        cold_data = {
            f"cold_back_flag_{window}": feat_df.apply(check_cold_back, axis=1),
            f"cold_ratio_{window}": feat_df.apply(calc_cold_ratio, axis=1)
        }
        cold_df = pd.DataFrame(cold_data, index=feat_df.index)
        feat_df = pd.concat([feat_df, cold_df], axis=1)

        # 5. 区间特征
        def calc_zone_ratios(red_list):
            zone1 = sum(1 for b in red_list if b in zone_map[1])
            zone2 = sum(1 for b in red_list if b in zone_map[2])
            zone3 = sum(1 for b in red_list if b in zone_map[3])
            return zone1/6, zone2/6, zone3/6

        if run_type == "predict":
            # 预测模式：基于历史平均区间分布（修复核心：拆分元组为单独列计算均值）
            # 1. 计算历史所有期的区间比例（返回元组列表）
            historical_zone_tuples = raw_df["red_list"].apply(calc_zone_ratios)
            # 2. 拆分为三个单独的 Series（避免直接对元组求均值）
            historical_zone1 = historical_zone_tuples.apply(lambda x: x[0])
            historical_zone2 = historical_zone_tuples.apply(lambda x: x[1])
            historical_zone3 = historical_zone_tuples.apply(lambda x: x[2])
            # 3. 计算各区间的历史均值
            avg_zone1 = historical_zone1.mean()
            avg_zone2 = historical_zone2.mean()
            avg_zone3 = historical_zone3.mean()
            # 4. 预测期使用历史均值（确保长度与 feat_df 匹配，预测模式仅1条数据）
            zone1_ratios = np.array([avg_zone1])
            zone2_ratios = np.array([avg_zone2])
            zone3_ratios = np.array([avg_zone3])
        else:
            zone_ratios = raw_df["red_list"].apply(calc_zone_ratios)
            zone1_ratios = np.array([x[0] for x in zone_ratios])
            zone2_ratios = np.array([x[1] for x in zone_ratios])
            zone3_ratios = np.array([x[2] for x in zone_ratios])

        # 后续的区间熵计算逻辑保持不变，但需确保输入是数组且长度匹配
        def rolling_entropy(arr, window):
            entropies = []
            for i in range(len(arr)):
                start = max(0, i - window + 1)
                window_vals = arr[start:i+1]
                p = np.histogram(window_vals, bins=5, density=True)[0] + 1e-8
                entropies.append(entropy(p))
            return np.array(entropies)

        # 修复：预测模式下熵值计算（输入数组长度为1，窗口取1避免索引错误）
        if run_type == "predict":
            # 预测模式仅1条数据，熵值用历史平均区间比例计算（或固定默认值）
            zone_entropy = calc_zone_entropy((zone1_ratios[0], zone2_ratios[0], zone3_ratios[0]))
            zone_entropy_std = np.array([zone_entropy])
        else:
            # 训练模式按原逻辑计算滚动熵
            zone_entropy_list = np.array([calc_zone_entropy((z1, z2, z3)) for z1, z2, z3 in zip(zone1_ratios, zone2_ratios, zone3_ratios)])
            zone_entropy_std = rolling_entropy(zone_entropy_list, window)

        zone_data = {
            f"zone1_ratio_{window}": zone1_ratios,
            f"zone2_ratio_{window}": zone2_ratios,
            f"zone3_ratio_{window}": zone3_ratios,
            f"zone_entropy_std_{window}": zone_entropy_std
        }

        # 修复：确保 zone_data 中所有数组长度与 feat_df 匹配（预测模式仅1条）
        if run_type == "predict" and len(feat_df) > 1:
            # 极端情况：feat_df 有多条数据，复制预测值填充
            zone_data = {
                key: np.full(len(feat_df), val[0]) for key, val in zone_data.items()
            }

        zone_df = pd.DataFrame(zone_data, index=feat_df.index)
        feat_df = pd.concat([feat_df, zone_df], axis=1)

        # 对齐标签（仅训练模式）
        if run_type == "train":
            feat_df = SSQ30PlusFeatureEngineer._align_feat_with_label_parallel(feat_df, label_df, min_valid_issue)
        
        logger.info(f"窗口{window}基础统计特征完成: {len(feat_df.columns)-2}个特征")
        return feat_df

    # ---------------------- (2)时序动态特征(并行兼容版) ----------------------
    @staticmethod
    def _calculate_temporal_dynamic_features(window, raw_df, df_01, red_cols_01, label_df, min_valid_issue, run_type, predict_issue):
        """按指定窗口计算时序动态特征"""
        logger.info(f"计算窗口{window}的时序动态特征(模式: {run_type})...")
        # 预测模式：仅生成待预测期的特征
        if run_type == "predict":
            feat_df = pd.DataFrame({"idx": [predict_issue], "date": [pd.Timestamp.now().strftime("%Y-%m-%d")]})
            valid_raw_df = raw_df.copy()  # 预测模式用所有历史数据
        else:
            feat_df = pd.DataFrame({"idx": raw_df["idx"], "date": raw_df["date"]})
            valid_idx = set(feat_df["idx"].values)
            valid_raw_df = raw_df[raw_df["idx"].isin(valid_idx)].sort_values("idx").reset_index(drop=True)
        
        valid_length = len(valid_raw_df)

        # 衍生基础列
        valid_raw_df["red_sum"] = valid_raw_df[["r1", "r2", "r3", "r4", "r5", "r6"]].sum(axis=1)
        valid_raw_df["blue_odd"] = (valid_raw_df["b1"] % 2).astype(int)
        valid_raw_df["red_odd_cnt"] = valid_raw_df[["r1", "r2", "r3", "r4", "r5", "r6"]].apply(
            lambda row: sum(1 for x in row if x % 2 == 1), axis=1
        )

        # 1. 移动窗口统计特征
        valid_red_sum = valid_raw_df["red_sum"].values
        if run_type == "predict":
            # 预测模式：历史窗口内的均值
            red_sum_mean = np.mean(valid_red_sum[-window:]) if len(valid_red_sum) >= window else np.mean(valid_red_sum)
            feat_df[f"red_sum_{window}mean"] = [red_sum_mean]
        else:
            feat_df[f"red_sum_{window}mean"] = np.convolve(valid_red_sum, np.ones(window)/window, mode='same')

        valid_blue_odd = valid_raw_df["blue_odd"].values
        if run_type == "predict":
            blue_odd_ratio = np.mean(valid_blue_odd[-window:]) if len(valid_blue_odd) >= window else np.mean(valid_blue_odd)
            feat_df[f"blue_odd_{window}ratio"] = [blue_odd_ratio]
        else:
            blue_odd_win = np.convolve(valid_blue_odd, np.ones(window), mode='same')
            feat_df[f"blue_odd_{window}ratio"] = blue_odd_win / window

        red_columns = ["r1", "r2", "r3", "r4", "r5", "r6"]
        valid_red_flat = valid_raw_df[red_columns].values.flatten()
        if run_type == "predict":
            # 预测模式：历史窗口内的重复数
            window_vals = valid_red_flat[-window*6:] if len(valid_red_flat) >= window*6 else valid_red_flat
            red_repeat_cnt = len(window_vals) - len(set(window_vals))
            feat_df[f"red_repeat_{window}cnt"] = [red_repeat_cnt]
        else:
            red_repeat_win = []
            for i in range(valid_length):
                start_idx = max(0, (i - window + 1) * 6)
                end_idx = (i + 1) * 6
                window_vals = valid_red_flat[start_idx:end_idx]
                red_repeat_win.append(len(window_vals) - len(set(window_vals)))
            feat_df[f"red_repeat_{window}cnt"] = red_repeat_win

        valid_blue = valid_raw_df["b1"].values
        if run_type == "predict":
            blue_std = np.std(valid_blue[-window:]) if len(valid_blue) >= window else np.std(valid_blue)
            feat_df[f"blue_std_{window}win"] = [blue_std]
        else:
            blue_std_win = [
                np.std(valid_blue[max(0, i - window + 1):i+1]) 
                for i in range(valid_length)
            ]
            feat_df[f"blue_std_{window}win"] = blue_std_win

        # 2. 状态转移矩阵特征
        def has_consecutive(row):
            red_list = [row["r1"], row["r2"], row["r3"], row["r4"], row["r5"], row["r6"]]
            sorted_red = sorted(red_list)
            return 1 if any(sorted_red[i+1] - sorted_red[i] == 1 for i in range(5)) else 0
        
        valid_raw_df[f"has_consecutive_{window}"] = valid_raw_df.apply(has_consecutive, axis=1)
        consecutive_flags = valid_raw_df[f"has_consecutive_{window}"].values

        if run_type == "predict":
            # 预测模式：历史窗口内的转移概率
            if len(consecutive_flags) < window + 1:
                consecutive_prob = 0.4
            else:
                window_flags = consecutive_flags[-window-1:]  # 包含最后一期
                prev_consecutive = window_flags[:-1]
                curr_consecutive = window_flags[1:]
                transfer_count = sum(prev_consecutive & curr_consecutive)
                total_count = sum(prev_consecutive)
                consecutive_prob = transfer_count / total_count if total_count >= 5 else 0.4
            feat_df[f"consecutive_transfer_prob_{window}"] = [consecutive_prob]
        else:
            if len(consecutive_flags) < 2:
                feat_df[f"consecutive_transfer_prob_{window}"] = [0.4] * valid_length
            else:
                prev_consecutive = consecutive_flags[:-1]
                curr_consecutive = consecutive_flags[1:]
                transfer_counts = np.zeros(valid_length, dtype=int)
                total_counts = np.zeros(valid_length, dtype=int)
                
                for i in range(window, valid_length):
                    start = max(0, i - window)
                    transfer_counts[i] = sum(prev_consecutive[start:i] & curr_consecutive[start:i])
                    total_counts[i] = sum(prev_consecutive[start:i])
                
                consecutive_probs = np.divide(
                    transfer_counts, 
                    total_counts, 
                    out=np.full_like(transfer_counts, 0.4, dtype=np.float64),
                    where=total_counts >= 5
                )
                feat_df[f"consecutive_transfer_prob_{window}"] = consecutive_probs

        # 对齐标签（仅训练模式）
        if run_type == "train":
            feat_df = SSQ30PlusFeatureEngineer._align_feat_with_label_parallel(feat_df, label_df, min_valid_issue)
        
        logger.info(f"窗口{window}时序动态特征完成: {len(feat_df.columns)-2}个特征")
        return feat_df

    # ---------------------- (3)关联关系特征(并行兼容版) ----------------------
    @staticmethod
    def _calculate_correlation_features(window, raw_df, df_01, red_cols_01, label_df, min_valid_issue, run_type, predict_issue):
        """按指定窗口计算关联关系特征"""
        logger.info(f"开始计算窗口{window}的关联关系特征(模式: {run_type})...")
        # 预测模式：仅生成待预测期的特征
        if run_type == "predict":
            feat_df = pd.DataFrame({"idx": [predict_issue], "date": [pd.Timestamp.now().strftime("%Y-%m-%d")]})
            recent_data = raw_df.tail(window) if len(raw_df) > window else raw_df  # 用最近窗口的历史数据
        else:
            feat_df = pd.DataFrame({"idx": raw_df["idx"], "date": raw_df["date"]})
            valid_raw_df = raw_df[raw_df["idx"] >= min_valid_issue].reset_index(drop=True)
            recent_data = valid_raw_df.tail(window) if len(valid_raw_df) > window else valid_raw_df
        
        total_periods = len(recent_data)
        top_k = 5

        # 1. 强关联组合
        co_occur_counts = defaultdict(int)
        for red_list in recent_data["red_list"]:
            for i in range(len(red_list)):
                for j in range(i+1, len(red_list)):
                    pair = tuple(sorted((red_list[i], red_list[j])))
                    co_occur_counts[pair] += 1

        top_co_occur = sorted(
            co_occur_counts.items(),
            key=lambda x: x[1]/total_periods,
            reverse=True
        )[:top_k]
        for i, ((a, b), cnt) in enumerate(top_co_occur):
            prob = cnt / total_periods
            feat_df[f"co_occur_{a}_{b}_prob_{window}"] = [prob] * len(feat_df)

            def check_co_occur(row, a=a, b=b):
                idx = row["idx"]
                if run_type == "predict":
                    # 预测模式：基于历史组合出现概率返回1（仅用于特征占位）
                    return 1 if prob > 0.3 else 0
                if idx < min_valid_issue:
                    return np.nan
                current_red = raw_df[raw_df["idx"] == idx]["red_list"].iloc[0]
                return 1 if (a in current_red and b in current_red) else 0
            
            feat_df[f"co_occur_{a}_{b}_flag_{window}"] = feat_df.apply(check_co_occur, axis=1)

        # 2. 排斥组合
        ball_probs = {}
        for ball in GLOBAL_CONFIG["red_balls"]:
            ball_probs[ball] = recent_data["red_list"].apply(lambda x: 1 if ball in x else 0).mean()
        
        repel_pairs = []
        for (a, b), cnt in co_occur_counts.items():
            theoretical = ball_probs[a] * ball_probs[b] * total_periods
            if cnt < theoretical * 0.6:
                repel_pairs.append(((a, b), cnt / total_periods))
        top_repel = sorted(repel_pairs, key=lambda x: x[1])[:top_k]
        for i, ((a, b), prob) in enumerate(top_repel):
            feat_df[f"repel_{a}_{b}_prob_{window}"] = [prob] * len(feat_df)

            def check_repel(row, a=a, b=b):
                idx = row["idx"]
                if run_type == "predict":
                    # 预测模式：基于历史组合排斥概率返回0（仅用于特征占位）
                    return 0 if prob < 0.1 else 1
                if idx < min_valid_issue:
                    return np.nan
                current_red = raw_df[raw_df["idx"] == idx]["red_list"].iloc[0]
                return 1 if (a in current_red and b in current_red) else 0
            
            feat_df[f"repel_{a}_{b}_flag_{window}"] = feat_df.apply(check_repel, axis=1)

        # 3. 跨区间关联
        zone1_3_cnt = 0
        zone1_3_zone3_2_cnt = 0
        zone_map = GLOBAL_CONFIG["zone_map"]
        for red_list in recent_data["red_list"]:
            z1 = sum(1 for b in red_list if b in zone_map[1])
            z3 = sum(1 for b in red_list if b in zone_map[3])
            if z1 == 3:
                zone1_3_cnt += 1
                if z3 == 2:
                    zone1_3_zone3_2_cnt += 1
        cross_prob = zone1_3_zone3_2_cnt / zone1_3_cnt if zone1_3_cnt > 0 else 0
        feat_df[f"cross_zone1_3_to_zone3_2_prob_{window}"] = [cross_prob] * len(feat_df)

        # 对齐标签（仅训练模式）
        if run_type == "train":
            feat_df = SSQ30PlusFeatureEngineer._align_feat_with_label_parallel(feat_df, label_df, min_valid_issue)
        
        logger.info(f"窗口{window}关联关系特征完成: {len(feat_df.columns)-2}个特征")
        return feat_df

    # ---------------------- (4)高级衍生特征(并行兼容版) ----------------------
    @staticmethod
    def _calculate_advanced_derived_features(window, raw_df, df_01, red_cols_01, label_df, min_valid_issue, run_type, predict_issue):
        """按指定窗口计算高级衍生特征"""
        logger.info(f"计算窗口{window}的高级衍生特征(模式: {run_type})...")
        # 预测模式：仅生成待预测期的特征
        if run_type == "predict":
            feat_df = pd.DataFrame({"idx": [predict_issue], "date": [pd.Timestamp.now().strftime("%Y-%m-%d")]})
        else:
            feat_df = pd.DataFrame({"idx": raw_df["idx"], "date": raw_df["date"]})

        # 1. 红球分布熵
        def calc_red_dist_entropy(row):
            idx = row["idx"]
            if run_type == "predict":
                # 预测模式：基于历史窗口内的分布熵
                past_data = df_01.tail(window) if len(df_01) > window else df_01
            else:
                past_data = df_01[(df_01["idx"] < idx) & (df_01["idx"] >= min_valid_issue)].tail(window)
            
            if len(past_data) < 10:
                return 1.5
            freq_dist = past_data[red_cols_01].sum() / past_data[red_cols_01].sum().sum()
            return entropy(freq_dist.values, base=len(GLOBAL_CONFIG["red_balls"]))
        
        feat_df[f"red_dist_entropy_{window}"] = feat_df.apply(calc_red_dist_entropy, axis=1)

        # 2. 蓝球波动系数
        if run_type == "predict":
            # 预测模式：基于历史窗口内的波动系数
            valid_raw_df = raw_df.tail(window) if len(raw_df) > window else raw_df
            blue_series = valid_raw_df["b1"].values
            mean = np.mean(blue_series)
            std = np.std(blue_series)
            blue_volatility = std / (mean + 1e-8) if mean != 0 else 0
            feat_df[f"blue_volatility_coeff_{window}"] = [blue_volatility]
        else:
            valid_raw_df = raw_df[raw_df["idx"] >= min_valid_issue].reset_index(drop=True)
            blue_series = valid_raw_df["b1"].values
            blue_volatility = []
            for i in range(len(blue_series)):
                start = max(0, i - window + 1)
                window_blue = blue_series[start:i+1]
                mean = np.mean(window_blue)
                std = np.std(window_blue)
                blue_volatility.append(std / (mean + 1e-8) if mean != 0 else 0)
            blue_volatility_map = dict(zip(valid_raw_df["idx"], blue_volatility))
            feat_df[f"blue_volatility_coeff_{window}"] = feat_df["idx"].map(blue_volatility_map)

        # 3. 热冷球共振指数
        hot_threshold = 0.3        
        # 优化：预测模式下冷号阈值降低为窗口的70%，避免空列表
        cold_threshold = window * 0.7 if run_type == "predict" else window
        def calc_hot_cold_resonance(row):
            idx = row["idx"]
            if run_type == "predict":
                # 预测模式：基于历史窗口内的热冷球分布
                past_window = df_01.tail(window) if len(df_01) > window else df_01
                # 假设待预测期红球为历史热冷球混合（仅用于特征计算）
                current_red = [ball for ball in GLOBAL_CONFIG["red_balls"] if past_window[f"r{ball}"].sum()/len(past_window) > 0.2]
            else:
                if idx < min_valid_issue:
                    return np.nan
                current_red = raw_df[raw_df["idx"] == idx]["red_list"].iloc[0]
                past_window = df_01[(df_01["idx"] < idx) & (df_01["idx"] >= min_valid_issue)].tail(window)
            
            if len(past_window) < 5:
                return 0
            hot_count = 0
            cold_count = 0
            for ball in current_red:
                hot_freq = past_window[f"r{ball}"].sum() / len(past_window)
                if hot_freq > hot_threshold:
                    hot_count += 1
                cold_miss = past_window.index[-1] - past_window[past_window[f"r{ball}"] == 1].index[-1] if sum(past_window[f"r{ball}"]) > 0 else window
                if cold_miss >= cold_threshold:
                    cold_count += 1
            return hot_count * cold_count
        
        feat_df[f"hot_cold_resonance_index_{window}"] = feat_df.apply(calc_hot_cold_resonance, axis=1)

        # 对齐标签（仅训练模式）
        if run_type == "train":
            feat_df = SSQ30PlusFeatureEngineer._align_feat_with_label_parallel(feat_df, label_df, min_valid_issue)
        
        logger.info(f"窗口{window}高级衍生特征完成: {len(feat_df.columns)-2}个特征")
        return feat_df

    # ---------------------- 并行兼容版: 特征对齐 ----------------------
    @staticmethod
    def _align_feat_with_label_parallel(feat_df, label_df, min_valid_issue):
        """并行版本: 特征与标签对齐"""
        if label_df is None or label_df.empty:
            logger.error("标签数据为空, 无法执行特征对齐")
            return None

        # 处理重复idx
        if feat_df["idx"].duplicated().any():
            duplicate_idx = feat_df[feat_df["idx"].duplicated(keep=False)]["idx"].unique()
            logger.error(f"特征数据中存在{len(duplicate_idx)}个重复idx值: {duplicate_idx.tolist()}")
            feat_df = feat_df.drop_duplicates(subset=["idx"], keep="first").reset_index(drop=True)

        # 筛选标签范围内的期号
        label_idx_set = set(label_df["idx"])
        feat_df = feat_df[feat_df["idx"].isin(label_idx_set)].reset_index(drop=True)

        # 按最小有效期号筛选
        if min_valid_issue is not None:
            feat_df = feat_df[feat_df["idx"] >= min_valid_issue].reset_index(drop=True)

        return feat_df

    # ---------------------- 特征质量校验 ----------------------
    def _check_feature_quality(self, feat_df):
        """特征质量校验: 缺失值统计、异常值检测、相关性分析"""
        logger.info("="*50)
        logger.info(f"特征质量校验(模式：{self.run_type})...")

        # 1. 缺失值处理
        missing_stats = feat_df.isnull().sum()
        missing_ratio = (missing_stats / len(feat_df)) * 100
        missing_df = pd.DataFrame({
            "缺失数量": missing_stats,
            "缺失比例(%)": missing_ratio.round(2)
        }).query("缺失数量 > 0")
        if not missing_df.empty:
            logger.warning(f"存在缺失值的特征:{missing_df}")
            numeric_cols = feat_df.select_dtypes(include=[np.float64, np.int64]).columns
            for col in numeric_cols:
                feat_df[col] = feat_df[col].fillna(feat_df[col].mean())

        # 2. 异常值检测(3σ原则)
        numeric_cols = feat_df.select_dtypes(include=[np.float64, np.int64]).columns
        outlier_stats = {}
        for col in numeric_cols:
            mean = feat_df[col].mean()
            std = feat_df[col].std()
            lower_bound = mean - 3 * std
            upper_bound = mean + 3 * std
            outlier_count = ((feat_df[col] < lower_bound) | (feat_df[col] > upper_bound)).sum()
            if outlier_count > 0:
                outlier_stats[col] = outlier_count
        if outlier_stats:
            logger.warning(f"存在异常值的特征数量: {len(outlier_stats)}, 异常值总数: {sum(outlier_stats.values())}")

        # 3. 高相关性特征剔除
        corr_matrix = feat_df[numeric_cols].corr()
        high_corr_pairs = []
        for i in range(len(corr_matrix.columns)):
            for j in range(i+1, len(corr_matrix.columns)):
                if abs(corr_matrix.iloc[i, j]) >= self.correlation_threshold:
                    high_corr_pairs.append((corr_matrix.columns[i], corr_matrix.columns[j]))
        if high_corr_pairs:
            logger.warning(f"高相关特征对(≥{self.correlation_threshold}): {len(high_corr_pairs)}对")
            drop_cols = set()
            for col1, col2 in high_corr_pairs:
                if col2 not in drop_cols:
                    drop_cols.add(col2)
            feat_df = feat_df.drop(columns=drop_cols)
            logger.info(f"剔除高相关特征: {list(drop_cols)}, 剩余特征数: {len(feat_df.columns)}")

        logger.info("特征质量校验完成")
        logger.info("="*50)
        return feat_df

    def _reduce_feature_dimension(self, feat_df):
        """
        特征维度精简(区分训练/预测模式)
        self.run_type: "train" 或 "predict"
        """
        logger.info("=" * 50)
        logger.info(f"特征维度精简(模式：{self.run_type})...")

        # 分离数值特征和非数值特征
        non_numeric_cols = ["idx", "date"]
        numeric_cols = feat_df.select_dtypes(include=[np.float64, np.int64]).columns.tolist()
        numeric_feat_df = feat_df[numeric_cols].copy()

        # ---------------------- 训练模式：生成并保存特征规则 ----------------------
        if self.run_type == "train":
            # 1. LightGBM特征重要性筛选(仅训练时计算)
            logger.info("步骤1: LightGBM特征重要性筛选(训练模式)")
            
            # 去重并对齐特征与标签
            feat_df_unique = feat_df.drop_duplicates(subset=["idx"], keep="first").reset_index(drop=True)
            label_df_unique = self.label_df.drop_duplicates(subset=["idx"], keep="first").reset_index(drop=True)
            feat_df_aligned = self._align_feat_with_label(feat_df_unique)  # 复用已有对齐方法
            common_idx = set(feat_df_aligned["idx"]) & set(label_df_unique["idx"])
            valid_sample_count = len(common_idx)
            logger.info(f"训练样本量: {valid_sample_count}, 初始特征数: {len(numeric_cols)}")

            # 样本量充足时进行特征筛选(阈值可根据实际情况调整)
            if valid_sample_count >= 100:  # 样本量足够时筛选
                # 合并特征与标签, 确保一一对应
                feat_df_common = feat_df_aligned[feat_df_aligned["idx"].isin(common_idx)].reset_index(drop=True)
                label_df_common = label_df_unique[label_df_unique["idx"].isin(common_idx)].reset_index(drop=True)
                feat_with_label = pd.merge(
                    feat_df_common, 
                    label_df_common, 
                    on="idx", 
                    how="inner", 
                    validate="one_to_one"
                )

                # 准备训练数据
                X = feat_with_label[numeric_cols]
                y = feat_with_label["label"]  # 标签列名为"label"
                X_train, X_val, y_train, y_val = train_test_split(
                    X, y, test_size=0.2, random_state=42, stratify=y  # 分类问题用stratify保持分布
                )

                # 训练LightGBM模型(用于计算特征重要性)
                lgb_train = lgb.Dataset(X_train, y_train)
                lgb_val = lgb.Dataset(X_val, y_val, reference=lgb_train)
                params = {
                    "objective": "multiclass",  # 双色球预测为多分类问题
                    "num_class": 34,  # 红球33个+蓝球1个(根据实际标签调整)
                    "metric": "multi_logloss",
                    "boosting_type": "gbdt",
                    "learning_rate": 0.05,
                    "num_leaves": 31,
                    "verbose": -1,
                    "seed": 42
                }

                # 早停机制避免过拟合
                early_stopping_callback = lgb.early_stopping(stopping_rounds=50, verbose=False)
                model = lgb.train(
                    params,
                    train_set=lgb_train,
                    valid_sets=[lgb_val],
                    num_boost_round=1000,
                    callbacks=[early_stopping_callback]
                )

                # 计算并筛选特征重要性(基于gain)
                feat_importance = pd.DataFrame({
                    "feature": numeric_cols,
                    "importance": model.feature_importance(importance_type="gain")  # 用gain更稳定
                }).sort_values("importance", ascending=False)

                # 筛选规则：保留重要性非0的特征, 或至少保留50个(避免过度筛选)
                min_keep = 50
                selected_cols = feat_importance[feat_importance["importance"] > 0]["feature"].tolist()
                if len(selected_cols) < min_keep:
                    logger.warning(f"有效特征不足{min_keep}个, 强制保留top{min_keep}")
                    selected_cols = feat_importance.head(min_keep)["feature"].tolist()

                # 日志输出筛选结果
                dropped_cols = [col for col in numeric_cols if col not in selected_cols]
                logger.info(f"特征筛选完成：保留{len(selected_cols)}个, 剔除{len(dropped_cols)}个")
                logger.debug(f"保留特征前10: {selected_cols[:10]}")

            else:  # 样本量不足时不筛选, 保留所有特征
                logger.warning(f"训练样本量不足({valid_sample_count} < 100), 不进行特征筛选")
                selected_cols = numeric_cols

            # 保存筛选后的特征列名(用于预测模式复用)
            joblib.dump(selected_cols, self.train_selected_features)
            logger.info(f"训练特征列已保存至: {self.train_selected_features}")

            # 2. PCA降维(训练时限制最大维度≤89, 适配预测样本量)
            max_pca_dim = min(len(selected_cols), 89)  # 核心：预测样本仅1期, 维度不能超过此值
            logger.info(f"步骤2: PCA降维(训练模式, 最大维度限制为{max_pca_dim})")

            if len(selected_cols) > max_pca_dim:  # 特征数超过最大维度时才降维
                # 标准化特征
                scaler = StandardScaler()
                scaled_feat = scaler.fit_transform(numeric_feat_df[selected_cols])

                # 训练PCA模型
                pca = PCA(n_components=max_pca_dim, random_state=42)
                pca_feat = pca.fit_transform(scaled_feat)

                # 保存scaler、PCA模型和维度信息
                joblib.dump(pca, self.pca_pkl)
                joblib.dump(scaler, self.scaler_pkl)
                joblib.dump(max_pca_dim, self.pca_dim_pkl)

                # 日志输出PCA解释力
                logger.info(f"PCA训练完成：维度{max_pca_dim}, 累计解释方差比：{pca.explained_variance_ratio_.sum():.4f}")

                # 构造PCA特征DataFrame
                pca_cols = [f"pca_feat_{i+1}" for i in range(max_pca_dim)]
                pca_feat_df = pd.DataFrame(pca_feat, columns=pca_cols, index=feat_df.index)

            else:  # 特征数≤最大维度时不降维
                logger.info(f"特征数({len(selected_cols)})≤最大维度({max_pca_dim}), 跳过PCA")
                # 保存空标记(表示无PCA)
                joblib.dump(None, self.pca_pkl)
                joblib.dump(None, self.scaler_pkl)
                joblib.dump(len(selected_cols), self.pca_dim_pkl)
                pca_feat_df = numeric_feat_df[selected_cols]

        # ---------------------- 预测模式：复用训练阶段的规则 ----------------------
        else:  # self.run_type == "predict"
            # 1. 加载训练时筛选的特征列
            logger.info("步骤1: 复用训练阶段筛选的特征列(预测模式)")
            if not os.path.exists(self.train_selected_features):
                logger.error("未找到训练特征筛选规则, 请先运行训练模式")
                raise FileNotFoundError(f"训练时的筛选特征和PCA规则文件{self.train_selected_features}不存在")
            
            '''删除最近三期的数据之后报： “特征列不匹配, 预测终止”, 这里不再直接退出，改成如下代码，如果过滤之后不再有特征列，则退出
            selected_cols = joblib.load(self.train_selected_features)
            # 检查预测特征是否与训练特征匹配
            missing_cols = [col for col in selected_cols if col not in numeric_cols]
            if missing_cols:
                logger.error(f"预测特征缺失训练必要列：{missing_cols}(共{len(missing_cols)}个)")
                raise ValueError("特征列不匹配, 预测终止")
            numeric_feat_df = numeric_feat_df[selected_cols]  # 强制对齐特征
            '''
            selected_cols = joblib.load(self.train_selected_features)
            # 检查预测特征是否与训练特征匹配（核心修改：从“强制报错”改为“自动过滤”）
            missing_cols = [col for col in selected_cols if col not in numeric_cols]
            if missing_cols:
                logger.warning(f"预测特征缺失训练必要列：{missing_cols}(共{len(missing_cols)}个)，自动过滤缺失列")  # 改error为warning
                # 关键：只保留“训练和预测都存在”的特征列，避免报错
                valid_selected_cols = [col for col in selected_cols if col in numeric_cols]
                # 安全校验：确保过滤后还有有效特征
                if len(valid_selected_cols) == 0:
                    logger.error("过滤后无有效特征列，无法继续预测")
                    raise ValueError("无有效特征列，预测终止")  # 仅在无特征时才报错
            else:
                valid_selected_cols = selected_cols
                logger.info("预测特征列与训练阶段完全匹配，无需过滤")
            
            # 用“过滤后的有效特征列”对齐预测数据（原代码用selected_cols，改为valid_selected_cols）
            numeric_feat_df = numeric_feat_df[valid_selected_cols]  
            logger.info(f"特征列对齐完成：最终保留{len(valid_selected_cols)}个有效特征")  # 新增日志，确认保留数量

            # 2. 复用训练时的PCA模型（适配过滤后的特征列）
            if not os.path.exists(self.pca_pkl) or not os.path.exists(self.scaler_pkl) or not os.path.exists(self.pca_dim_pkl):
                logger.error("未找到训练阶段的PCA/Scaler模型, 请先运行训练模式")
                raise FileNotFoundError("PCA/Scaler模型文件缺失")
            logger.info("步骤2: 复用训练阶段的PCA模型(预测模式)")
            pca = joblib.load(self.pca_pkl)
            scaler = joblib.load(self.scaler_pkl)
            max_pca_dim = joblib.load(self.pca_dim_pkl)

            if pca is not None and scaler is not None:
                # 关键适配：如果过滤后的特征数与PCA训练时的特征数不一致，重新调整PCA维度
                pca_n_features = pca.n_features_in_  # 获取PCA训练时的特征数
                current_n_features = len(valid_selected_cols)  # 当前过滤后的特征数
                if current_n_features != pca_n_features:
                    logger.warning(f"PCA输入特征数不匹配（训练时{ pca_n_features }个，当前{current_n_features}个），调整PCA输出维度")
                    # 重新设置PCA输出维度（取当前特征数和原max_pca_dim的较小值，避免维度错误）
                    adjusted_pca_dim = min(current_n_features, max_pca_dim)
                else:
                    adjusted_pca_dim = max_pca_dim
                
                # 用训练好的scaler和PCA转换预测特征（正常流程，仅维度用adjusted_pca_dim）
                scaled_feat = scaler.transform(numeric_feat_df)
                pca_feat = pca.transform(scaled_feat)[:, :adjusted_pca_dim]  # 按调整后的维度截取
                pca_cols = [f"pca_feat_{i+1}" for i in range(adjusted_pca_dim)]
                pca_feat_df = pd.DataFrame(pca_feat, columns=pca_cols, index=feat_df.index)
                logger.info(f"PCA转换完成, 预测特征维度：{adjusted_pca_dim}（原配置{max_pca_dim}）")
            else:
                # 训练时未降维, 直接使用过滤后的原始特征
                pca_feat_df = numeric_feat_df
                logger.info(f"复用原始特征, 维度：{len(valid_selected_cols)}")

        # 合并非数值特征和处理后的数值特征
        reduced_feat_df = pd.concat([feat_df[non_numeric_cols], pca_feat_df], axis=1)
        logger.info(f"特征维度精简完成({self.run_type}), 最终特征数：{len(reduced_feat_df.columns)}")
        logger.info("=" * 50)
        return reduced_feat_df

    # ---------------------- 核心: 多进程计算并融合所有特征 ----------------------
    def merge_all_features(self):
        try:
            logger.info(f"多进程计算并融合所有特征(模式：{self.run_type})...")
            # 1. 多进程计算每个窗口的特征
            logger.info(f"===== 启动多进程计算窗口特征: {self.windows} =====")
            pool = multiprocessing.Pool(processes=len(self.windows))  # 每个窗口一个进程
            tasks = []

            # 为每个窗口创建任务(传递不可变参数或深拷贝数据, 避免进程竞争)
            for window in self.windows:
                # 传递数据副本, 避免进程间数据共享冲突
                raw_df_copy = self.raw_df.copy()
                df_01_copy = self.df_01.copy()
                # 预测模式下标签列为None，传递空DataFrame避免报错
                label_df_copy = self.label_df.copy() if self.label_df is not None else pd.DataFrame(columns=["idx", "label"])
                
                task = pool.apply_async(
                    SSQ30PlusFeatureEngineer._calc_window_features,
                    args=(window, raw_df_copy, df_01_copy, self.red_cols_01, label_df_copy, self.min_valid_issue, self.plus30_folder,
                        self.run_type, self.predict_issue
                    )
                )
                tasks.append(task)

            # 关闭进程池, 等待所有任务完成
            pool.close()
            pool.join()

            # 收集所有窗口的特征结果(过滤失败的任务)
            window_features = []
            for i, task in enumerate(tasks):
                window = self.windows[i]
                try:
                    feat_df = task.get()  # 获取任务结果
                    if feat_df is not None and not feat_df.empty:
                        window_features.append(feat_df)
                        logger.info(f"成功收集窗口{window}的特征")
                    else:
                        logger.error(f"窗口{window}的特征计算失败, 跳过该窗口")
                except Exception as e:
                    logger.error(f"获取窗口{window}特征失败: {str(e)}")

            if not window_features:
                logger.critical("所有窗口特征计算均失败, 终止流程")
                return False

            # 2. 融合所有窗口的特征
            logger.info("===== 融合所有窗口的特征 =====")
            # 初始化为第一个窗口的特征
            merged_feat = window_features[0]
            # 合并其他窗口的特征
            for i in range(1, len(window_features)):
                merged_feat = pd.merge(
                    merged_feat,
                    window_features[i].drop(columns=["date"], errors="ignore"),
                    on="idx",
                    how="inner"
                )
                merged_feat = merged_feat.loc[:, ~merged_feat.columns.duplicated(keep='first')]
            
            # 3. 合并分形特征(已按窗口划分)
            if self.fractal_df is not None and not self.fractal_df.empty:
                merged_feat = pd.merge(
                    merged_feat,
                    self.fractal_df.drop(columns=["date"], errors="ignore"),
                    on="idx",
                    how="inner"
                )
                merged_feat = merged_feat.loc[:, ~merged_feat.columns.duplicated(keep='first')]
            logger.info(f"所有窗口特征+分形特征融合完成: {len(merged_feat)}期, {len(merged_feat.columns)}个特征")

            # 4. 特征质量校验
            merged_feat = self._check_feature_quality(merged_feat)

            # 5. 特征维度精简
            merged_feat = self._reduce_feature_dimension(merged_feat)

            self.final_feat_df = merged_feat
            logger.info(f"所有特征融合完成! 最终特征矩阵: {len(self.final_feat_df)}期, {len(self.final_feat_df.columns)}个特征")
            return True
        except Exception as e:
            logger.error(f"融合特征失败: {str(e)}{traceback.format_exc()}")
            return False

    # ---------------------- 保存最终特征矩阵 ----------------------
    def save_final_features(self):
        """保存最终的特征矩阵到CSV文件"""
        try:
            if self.final_feat_df is None:
                raise ValueError("未生成最终特征矩阵, 请先执行融合操作")
            
            self.final_feat_df.to_csv(self.full_plus30_features, index=False, encoding="utf-8")
            logger.info(f"最终特征矩阵已保存至: {self.full_plus30_features}")
            logger.info(f"最终特征列名: {self.final_feat_df.columns.tolist()}")
            return True
        except Exception as e:
            logger.error(f"保存特征矩阵失败: {str(e)}{traceback.format_exc()}")
            return False

    # ---------------------- 主执行方法 ----------------------
    def do_30plus_features(self):
        """执行完整的特征工程流程"""
        logger.info("="*50)
        logger.info(f"开始执行多窗口多进程特征工程流程 - 模式：{self.run_type}")
        logger.info("="*50)

        # 步骤1: 加载基础数据
        logger.info(f'1/{"4" if self.run_type == "predict" else "5"}. 加载基础数据')
        if not self.load_basic_data():
            logger.critical(f'1.加载基础数据失败!!')
            return False

        # 步骤2: 加载分形特征(按13/34/55窗口)
        logger.info(f'2/{"4" if self.run_type == "predict" else "5"}. 加载分形特征(按13/34/55窗口)')
        if not self.load_aligned_fractal_features():
            logger.critical(f'2.加载分形特征失败!!')
            return False

        # 步骤3: 多进程计算并融合所有特征
        logger.info(f'3/{"4" if self.run_type == "predict" else "5"}. 多进程计算并融合所有特征')
        if not self.merge_all_features():
            logger.critical(f'3.融合所有特征失败!!')
            return False

        # 步骤4: 合并01序列化红球列(预测模式仅用于特征完整性，无标签)
        logger.info(f'4/{"4" if self.run_type == "predict" else "5"}. 合并01序列化红球列')
        if not self.merge_01_features():
            logger.critical(f'4.合并01列失败!!')
            return False
        
        # 步骤5: 仅训练模式执行标签构造
        if self.run_type == "train":
            logger.info(f'5/5. 构造标签列')
            if not self.build_label_columns():
                logger.critical(f'5.构造标签列失败!!')
                return False

        # 步骤6: 保存结果
        if not self.save_final_features():
            return False

        # 预测模式额外输出待预测样本特征
        if self.run_type == "predict":
            predict_feat = self.final_feat_df[self.final_feat_df["idx"] == self.predict_issue].copy()
            predict_feat_path = os.path.join(self.data_folder, f"predict_feat_issue_{self.predict_issue}.csv")
            predict_feat.to_csv(predict_feat_path, index=False, encoding="utf-8")
            logger.info(f"待预测期{self.predict_issue}特征已单独保存至: {predict_feat_path}")

        logger.info("="*50)
        logger.info(f"多窗口多进程特征工程流程执行完成! - 模式：{self.run_type}")
        logger.info("="*50)
        return True


# ---------------------- 执行入口 ----------------------
if __name__ == "__main__":
    # # Ubuntu系统无需Windows的freeze_support(), 直接初始化执行
    # engineer = SSQ30PlusFeatureEngineer()
    # success = engineer.do_30plus_features()
    # exit(0 if success else 1)

    # 预测模式执行
    predict_engineer = SSQ30PlusFeatureEngineer(run_type="predict")
    predict_success = predict_engineer.do_30plus_features()
    
    # 训练模式执行（原有逻辑）
    # train_engineer = SSQ30PlusFeatureEngineer(run_type="train")
    # train_success = train_engineer.do_30plus_features()
    
    exit(0 if predict_success else 1)