'''
model_utils.py	专注于模型保存、加载、序列化	依赖模型类型(XGB/LGB/PyTorch 等)	被训练类调用, 统一处理模型持久化操作
'''

import os
import json
import joblib
import traceback
import pandas as pd
import numpy as np
import xgboost as xgb
import lightgbm as lgb
import torch
from datetime import datetime
from filelock import FileLock  # 需安装：pip install filelock
from typing import Any, Optional, Tuple, List, Dict
from config.ssq_config import SSQ_CONFIG, logger
from trainer.model_train.lstm_train import LSTMModel

def save_model(
    model: Any,
    model_name: str,
    model_type: str = "sklearn",
    feature_cols: Optional[List[str]] = None,  # 训练时使用的特征列列表(必传)
    model_params: Optional[Dict[str, Any]] = None,  # 模型训练超参数
    threshold: Optional[float] = None,  # 最优分类阈值
    version: Optional[str] = None,  # 训练版本号
    extra_data: Optional[Dict[str, Any]] = None,  # 额外需要保存的数据
) -> str:
    """
    统一保存模型：含模型权重+训练配置(特征列+超参数+阈值+版本)，支持多模型类型
    注：兼容新增的阈值和版本参数，确保预测时参数完整
    """
    # 1. 模型类型-后缀映射(与load_model严格一致)
    suffix_map = {
        "xgb": ".xgb",
        "lgb": ".lgb",
        "pytorch": ".pth",
        "sklearn": ".pkl",
        "lr": ".pkl",
        "rf": ".pkl",
    }
    
    # 打印参数日志
    logger.info(f'def save_model() parameters: ')
    logger.info(f'\tmodel_name: {model_name}')
    logger.info(f'\tmodel_type: {model_type}')
    logger.info(f'\tfeature_cols: {feature_cols}')
    logger.info(f'\tmodel_params: {model_params}')
    logger.info(f'\tthreshold: {threshold}')
    logger.info(f'\tversion: {version}')
    # logger.info(f'\textra_data: {extra_data}')

    # 统一类型小写，避免大小写问题
    model_type = model_type.lower()
    # 获取文件后缀
    suffix = suffix_map.get(model_type, ".pkl")
    # 构建保存目录（按版本隔离）
    base_dir = SSQ_CONFIG['train_file']['model_save_dir']
    model_dir = os.path.join(base_dir, version) if version else base_dir
    os.makedirs(model_dir, exist_ok=True)
    # 模型保存路径
    model_path = os.path.join(model_dir, f"{model_name}{suffix}")
    lock_path = model_path + ".lock"
    # 模型具体类型（用于日志展示）
    model_specific_type = type(model).__name__ if model_type != "pytorch" else "PyTorch/LSTM"

    try:
        # 2. 清理旧损坏文件（小于1KB视为损坏）
        if os.path.exists(model_path) and os.path.getsize(model_path) < 1024:
            logger.warning(f"删除损坏的旧模型文件: {model_path}")
            os.remove(model_path)

        # 3. 多进程文件锁保护（避免并发保存冲突）
        with FileLock(lock_path):
            # 通用配置信息（所有模型共享）
            common_config = {
                "model_type": model_type,  # 模型类型（lr/xgb/lgb等）
                "model_name": model_name,  # 模型名称（如lr_r10_next）
                "feature_cols": feature_cols or [],
                "model_params": model_params or {},
                "threshold": threshold,
                "version": version,
                "save_time": datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
                "extra_data": extra_data
            }

            # 3.1 XGBoost：文本模型+独立参数文件（双文件模式）
            if model_type == "xgb":
                model.save_model(model_path)
                # 保存参数文件
                params_path = model_path.replace(".xgb", "_params.pkl")
                xgb_specific = {
                    "best_iteration": model.best_iteration if hasattr(model, "best_iteration") else None,
                    "best_score": model.best_score if hasattr(model, "best_score") else None,
                    "best_ntree_limit": model_params.get("best_ntree_limit") if model_params else None
                }
                save_params = {**common_config, **xgb_specific}
                joblib.dump(save_params, params_path, protocol=4)
                logger.info(f"XGB参数文件保存成功 | 路径: {params_path}")

            # 3.2 LightGBM：文本模型+独立参数文件
            elif model_type == "lgb":
                model.save_model(model_path)
                # 保存参数文件
                params_path = model_path.replace(".lgb", "_params.pkl")
                lgb_specific = {
                    "best_iteration": model.best_iteration if hasattr(model, "best_iteration") else None
                }
                save_params = {**common_config, **lgb_specific}
                joblib.dump(save_params, params_path, protocol=4)
                logger.info(f"LGB参数文件保存成功 | 路径: {params_path}")

            # 3.3 PyTorch(LSTM)：单文件打包（权重+配置）
            elif model_type == "pytorch":
                save_data = {
                    "state_dict": model.state_dict(),
                    **common_config
                }
                torch.save(
                    save_data,
                    model_path,
                    pickle_protocol=4,
                    _use_new_zipfile_serialization=True
                )

            # 3.4 sklearn系列（LR/RF/sklearn）：单文件打包（模型+配置）
            elif model_type in ["sklearn", "lr", "rf"]:
                save_data = {
                    "model": model,
                    **common_config
                }
                joblib.dump(save_data, model_path, protocol=4)
                # 细化模型类型日志（更直观）
                if model_type == "lr":
                    model_specific_type = "LogisticRegression"
                elif model_type == "rf":
                    model_specific_type = "RandomForest"
                else:
                    if "lr" in model_specific_type.lower():
                        model_specific_type = "LogisticRegression"
                    elif "rf" in model_specific_type.lower():
                        model_specific_type = "RandomForest"

            # 不支持的模型类型
            else:
                logger.error(f"不支持的模型类型: {model_type}, {traceback.format_exc()}")
                return ""

        # 4. 保存后校验（确保文件完整）
        if os.path.exists(model_path) and os.path.getsize(model_path) >= 1024:
            file_size = round(os.path.getsize(model_path)/1024, 1)
            feature_count = len(feature_cols) if feature_cols else 0
            logger.info(
                f"{model_specific_type}模型保存成功 | "
                f"路径: {model_path} | 大小: {file_size}KB | "
                f"训练特征数: {feature_count} | "
                f"版本: {version or 'unknown'}"
            )
            # 清理锁文件
            if os.path.exists(lock_path):
                os.remove(lock_path)
            return model_path
        else:
            logger.error(f"{model_specific_type}模型保存失败：文件不完整")
            return ""

    # 处理filelock未安装的情况（降级保存）
    except ImportError:
        logger.warning("未安装filelock，跳过文件锁防护(建议执行：pip install filelock)")
        common_config = {
            "model_type": model_type,
            "model_name": model_name,
            "feature_cols": feature_cols or [],
            "model_params": model_params or {},
            "threshold": threshold,
            "version": version
        }
        if model_type in ["xgb", "lgb"]:
            model.save_model(model_path)
            params_path = model_path.replace(f".{model_type}", f"_{model_type}_params.pkl")
            joblib.dump(common_config, params_path)
        elif model_type == "pytorch":
            torch.save({
                "state_dict": model.state_dict(),
                **common_config
            }, model_path)
        elif model_type in ["sklearn", "lr", "rf"]:
            joblib.dump({
                "model": model,
                **common_config
            }, model_path)
        return model_path if os.path.exists(model_path) else ""

    # 处理其他异常
    except Exception as e:
        logger.error(
            f"{model_specific_type}模型保存异常 | 名称: {model_name} | "
            f"错误: {str(e)}\n{traceback.format_exc()}"
        )
        # 清理损坏文件
        if os.path.exists(model_path) and os.path.getsize(model_path) < 1024:
            os.remove(model_path)
        return ""

'''
传入的参数值样例： 
    model_name: xgb_r33_next
    model_type: xgb
    device: cpu
    current_feature_cols: r33_next
    model_dir: /opt/coding/500w/03.ssq_fractal/data/../models/vXGB_20251029_train

def save_model(
    model_name: str,
    model_type: str = "sklearn",
    feature_cols: Optional[List[str]] = None,  # 训练时使用的特征列列表(必传)
    model_params: Optional[Dict[str, Any]] = None,  # 模型训练超参数
    threshold: Optional[float] = None,  # 最优分类阈值
    version: Optional[str] = None,  # 训练版本号
    extra_data: Optional[Dict[str, Any]] = None,  # 额外需要保存的数据
) -> str:

'''

def load_model(
    model_name: str,
    model_type: str = "sklearn",
    device: str = "cpu",
    current_feature_cols: Optional[List[str]] = None,
    model_dir: Optional[str] = None
) -> Tuple[Optional[Any], Optional[Dict[str, Any]]]:
    """统一加载模型：修复rf/lr模型类型识别问题"""
    logger.info(f'load_model() func parameters: ')
    logger.info(f'\t model_name: {model_name}')
    logger.info(f'\t model_type: {model_type}')
    logger.info(f'\t device: {device}')
    logger.info(f'\t current_feature_cols: {current_feature_cols}')
    logger.info(f'\t model_dir: {model_dir}')
    
    # 1. 模型类型-后缀映射(关键修复：明确rf/lr属于sklearn类型)
    suffix_map = {
        "xgb": ".xgb",
        "lgb": ".lgb",
        "pytorch": ".pth",
        "sklearn": ".pkl",
        "lr": ".pkl",    # 逻辑回归属于sklearn
        "rf": ".pkl"     # 随机森林属于sklearn
    }
    model_type = model_type.lower()
    suffix = suffix_map.get(model_type, ".pkl")
    
    # 2. 确定模型路径
    '''
    xgb_r32_next.xgb
    xgb_r32_next_params.pkl
    '''
    model_path = os.path.join(model_dir, f"{model_name}{suffix}")
    model_config = None

    # 3. 基础文件校验
    logger.info(f'加载的模型路径 model_path: {model_path}')
    if not os.path.exists(model_path):
        logger.error(f"模型文件不存在 | 路径: {model_path},{traceback.format_exc()}")
        return None, None
    if os.path.getsize(model_path) < 1024:
        logger.error(f"模型文件损坏(大小不足1KB) | 路径: {model_path},{traceback.format_exc()}")
        return None, None

    try:
        # 4. 关键修复：将rf/lr归类到sklearn处理逻辑
        if model_type in ["sklearn", "lr", "rf"]:
            # 加载sklearn类型模型(含lr/rf)
            save_data = joblib.load(model_path)
            if "model" not in save_data:
                logger.error(f"sklearn模型实例缺失(需用新版save_model重新保存),{traceback.format_exc()}")
                return None, None
            model = save_data["model"]
            model_config = save_data

            # 关键修改：只对非LR模型校验特征列集合，LR跳过（后续筛选后校验数量）
            train_feature_cols = model_config["feature_cols"]
            if current_feature_cols and model_type != "lr":  # 新增：model_type != "lr"
                if set(train_feature_cols) != set(current_feature_cols):
                    logger.warning(
                        f"{type(model).__name__}特征列不一致 | 训练时: {len(train_feature_cols)}个 | "
                        f"当前: {len(current_feature_cols)}个,{traceback.format_exc()}")
            
            # 日志输出
            model_specific_type = type(model).__name__
            if "lr" in model_specific_type.lower():
                model_specific_type = "LogisticRegression"
            elif "rf" in model_specific_type.lower():
                model_specific_type = "RandomForest"
            
            logger.info(
                f"{model_specific_type}模型加载成功 | 路径: {model_path} | "
                f"超参数: {list(model_config.get('model_params', {}).keys())} | "
                f"版本: {model_config.get('version', 'unknown')}"
            )
            return model, model_config

        # 5. XGBoost模型加载(保持不变)
        elif model_type == "xgb":
            model = xgb.Booster(model_file=model_path)
            params_path = model_path.replace(".xgb", "_params.pkl")
            if not os.path.exists(params_path):
                logger.error(f"XGB参数文件缺失 | 路径: {params_path},{traceback.format_exc()}")
                return None, None
            model_config = joblib.load(params_path)
            
            # 特征列校验
            logger.info(f'xgb-model_config: {model_config}')
            train_feature_cols = model_config["feature_cols"]
            if current_feature_cols and set(train_feature_cols) != set(current_feature_cols):
                logger.warning(
                    f"XGB特征列不一致 | 训练时: {len(train_feature_cols)}个 | "
                    f"当前: {len(current_feature_cols)}个,{traceback.format_exc()}"
                )
            
            logger.info(
                f"XGB模型加载成功 | 路径: {model_path} | "
                f"特征数: {model.num_features()} | "
                f"最佳树数量: {model_config['best_iteration']} | "
                f"版本: {model_config.get('version', SSQ_CONFIG['train_version'])}"
            )
            return model, model_config

        # 6. LightGBM模型加载(保持不变)
        elif model_type == "lgb":
            model = lgb.Booster(model_file=model_path)
            params_path = model_path.replace(".lgb", "_params.pkl")
            if not os.path.exists(params_path):
                logger.error(f"LGB参数文件缺失 | 路径: {params_path},{traceback.format_exc()}")
                return None, None
            model_config = joblib.load(params_path)
            
            # 特征列校验
            logger.info(f'lgb-model_config: {model_config}')
            train_feature_cols = model_config["feature_cols"]
            if current_feature_cols and set(train_feature_cols) != set(current_feature_cols):
                logger.warning(
                    f"LGB特征列不一致 | 训练时: {len(train_feature_cols)}个 | "
                    f"当前: {len(current_feature_cols)}个"
                )
            
            logger.info(
                f"LGB模型加载成功 | 路径: {model_path} | "
                f"特征数: {model.num_feature()} | "
                f"最佳迭代轮次: {model_config['best_iteration']} | "
                f"版本: {model_config.get('version', SSQ_CONFIG['train_version'])}"
            )
            return model, model_config

        # 7. PyTorch(LSTM)模型加载(保持不变)
        elif model_type == "pytorch":
            save_data = torch.load(
                model_path,
                map_location=device,
                encoding="utf-8",
                weights_only=False
            )
            model_config = save_data
            # logger.info(f'pytorch-model_config: {model_config}')
            lstm_params = model_config.get("model_params", {})
            required_params = ["input_size", "hidden_size", "num_layers", "dropout"]
            if not all(p in lstm_params for p in required_params):
                logger.error(f"LSTM参数不完整 | 缺失: {set(required_params) - set(lstm_params.keys())},{traceback.format_exc()}")
                return None, None
            
            model_struct = LSTMModel(
                input_size=lstm_params["input_size"],
                hidden_size=lstm_params["hidden_size"],
                num_layers=lstm_params["num_layers"],
                dropout=lstm_params["dropout"]
            )
            
            if "state_dict" not in save_data:
                logger.error(f"LSTM模型权重缺失,{traceback.format_exc()}")
                return None, None
            model_struct.load_state_dict(save_data["state_dict"])
            model_struct.eval()
            
            # 特征列校验
            train_feature_cols = model_config["feature_cols"]
            if current_feature_cols and set(train_feature_cols) != set(current_feature_cols):
                logger.warning(
                    f"LSTM特征列不一致 | 训练时: {len(train_feature_cols)}个 | "
                    f"当前: {len(current_feature_cols)}个,{traceback.format_exc()}"
                )
            
            logger.info(
                f"LSTM模型加载成功 | 路径: {model_path} | "
                f"输入维度: {lstm_params['input_size']} | "
                f"设备: {device} | "
                f"版本: {model_config.get('version', SSQ_CONFIG['train_version'])}"
            )
            return model_struct, model_config

        # 8. 不支持的模型类型
        else:
            logger.error(f"不支持的模型类型 | {model_type}(支持: xgb/lgb/pytorch/sklearn/lr/rf)")
            logger.error(f"{traceback.format_exc()}")
            return None, None

    except Exception as e:
        logger.error(
            f"模型加载异常 | 名称: {model_name} | 类型: {model_type} | "
            f"路径: {model_path} | 错误: {str(e)}\n{traceback.format_exc()}"
        )
        return None, None

def init_dataset(
    label_col: str,
    config_label_cols: List[str],
    train_path: Optional[str] = None,
    val_path: Optional[str] = None,
    test_path: Optional[str] = None,
    load_test: bool = False,
    print_sample: bool = True,
    run_type: str = 'train',
) -> Tuple[bool, Dict[str, Optional[np.ndarray]]]:
    """
    公共数据集初始化函数(所有返回值均为numpy数组, 统一后续模型处理逻辑)
    :return: 数据字典中X_train/X_val/X_test、y_train/y_val/y_test均为numpy.ndarray
    """
    data_dict = {
        "X_train": None, "X_val": None, "X_test": None,
        "y_train": None, "y_val": None, "y_test": None,
        "feature_cols": None, "threshold": None
    }
    try:
        # 恢复原始路径逻辑：训练和预测仅区分目录，文件名沿用原有定义（避免硬编码）
        data_folder = SSQ_CONFIG["train_file"]["DATA_FOLDER"]
        train_path = os.path.join(data_folder,SSQ_CONFIG['train_file']['train_data_path'])
        val_path = os.path.join(data_folder,SSQ_CONFIG['train_file']['val_data_path'])
        test_path = os.path.join(data_folder,SSQ_CONFIG['train_file']['test_data_path'])
        if run_type == "predict":
            data_folder = SSQ_CONFIG["predict_file"]["DATA_FOLDER"]
            # 预测模式无需训练/验证/测试集路径，仅保留合并特征路径（文件名沿用final_merge_features）
            train_path = os.path.join(data_folder,SSQ_CONFIG['predict_file']['train_data_path'])
            val_path = os.path.join(data_folder,SSQ_CONFIG['predict_file']['val_data_path'])
            test_path = os.path.join(data_folder,SSQ_CONFIG['predict_file']['test_data_path'])

        train_df = pd.read_csv(train_path, encoding="utf-8")
        val_df = pd.read_csv(val_path, encoding="utf-8")
        test_df = pd.read_csv(test_path, encoding="utf-8") if load_test else None
        
        # 2. 缺失值处理(保持不变)
        for df in [train_df, val_df]:
            if df.isnull().sum().sum() > 0:
                df.fillna(0, inplace=True)
        if load_test and test_df is not None:
            test_df.fillna(0, inplace=True)
        
        # 3. 筛选特征列(保持不变)
        drop_cols = ["idx", "date"] + config_label_cols
        feature_cols = [col for col in train_df.columns if col not in drop_cols]
        data_dict["feature_cols"] = feature_cols
        
        # 4. 验证标签列存在性(保持不变)
        for df in [train_df, val_df]:
            if label_col not in df.columns:
                logger.error(f"[{label_col}]缺少标签列'{label_col}'")
                return False, data_dict
        if load_test and test_df is not None and label_col not in test_df.columns:
            logger.error(f"[{label_col}]测试集缺少标签列'{label_col}'")
            return False, data_dict
        
        # 5. 打印样本(适配numpy数组的打印方式)
        if print_sample:
            logger.info(f"===== [{label_col}] 训练集特征前5行(含列名)=====")
            # 先取DataFrame的前5行, 再转数组(方便打印列名)
            sample_df = train_df[feature_cols].head(5)
            logger.info(f"列名(共{len(feature_cols)}列): {list(sample_df.columns[:10])}...")
            sample_array = sample_df.to_numpy()  # 转为数组
            for idx in range(sample_array.shape[0]):
                row_str = [f"{feature_cols[i]}: {sample_array[idx, i]:.4f}" for i in range(min(8, len(feature_cols)))]
                logger.info(f"第{idx}行: {' | '.join(row_str)}...")
            logger.info(f"===== [{label_col}] 样本打印结束 =====\n")
        
        # 6. 标签二值化(结果为numpy数组, 保持不变)
        raw_y_train = train_df[label_col].astype(float).values
        threshold = np.mean(raw_y_train)
        data_dict["threshold"] = threshold
        
        y_train = np.where(raw_y_train >= threshold, 1, 0).astype(int)
        y_val = np.where(val_df[label_col].astype(float).values >= threshold, 1, 0).astype(int)
        y_test = np.where(test_df[label_col].astype(float).values >= threshold, 1, 0).astype(int) if load_test and test_df is not None else None
        
        # 7. 特征矩阵转为numpy数组(核心修改：X从DataFrame→numpy)
        X_train = train_df[feature_cols].astype(np.float32).to_numpy()  # DataFrame→数组
        X_val = val_df[feature_cols].astype(np.float32).to_numpy()
        X_test = test_df[feature_cols].astype(np.float32).to_numpy() if load_test and test_df is not None else None
        
        # 8. 赋值到数据字典(所有X/y均为numpy数组)
        data_dict["X_train"] = X_train
        data_dict["X_val"] = X_val
        data_dict["X_test"] = X_test
        data_dict["y_train"] = y_train
        data_dict["y_val"] = y_val
        data_dict["y_test"] = y_test
        
        # 9. 验证标签分布(直接使用numpy数组)
        _check_label_distribution(y_train, y_val, label_col, y_test)
        
        # 打印类型日志, 确认统一为numpy数组
        logger.info(f"[{label_col}]数据集类型统一为numpy数组:")
        logger.info(f"  X_train: {type(data_dict['X_train'])}, 形状: {data_dict['X_train'].shape}")
        logger.info(f"  y_train: {type(data_dict['y_train'])}, 形状: {data_dict['y_train'].shape}")
        
        logger.info(f"[{label_col}]数据集初始化完成")
        return True, data_dict
    
    except Exception as e:
        logger.error(f"[{label_col}]数据集初始化失败: {str(e)}\n{traceback.format_exc()}")
        return False, data_dict
    
def _check_label_distribution(y_train: np.ndarray, y_val: np.ndarray, label_col: str, y_test: Optional[np.ndarray] = None):
    """公共函数：检查标签分布(复用原逻辑, 适配训练/验证/测试集)"""
    # 训练集标签检查
    train_counts = np.bincount(y_train) if len(np.unique(y_train)) > 1 else [len(y_train)]
    if len(train_counts) < 2:
        logger.warning(f"[{label_col}]训练集标签仅含1个类别！可能导致模型失效")
    else:
        train_pos_ratio = train_counts[1] / len(y_train)
        logger.info(f"[{label_col}]训练集正样本占比: {train_pos_ratio:.2%}")
    
    # 验证集标签检查
    val_counts = np.bincount(y_val) if len(np.unique(y_val)) > 1 else [len(y_val)]
    if len(val_counts) < 2:
        logger.warning(f"[{label_col}]验证集标签仅含1个类别！评估结果可能无效")
    else:
        val_pos_ratio = val_counts[1] / len(y_val)
        logger.info(f"[{label_col}]验证集正样本占比: {val_pos_ratio:.2%}")
    
    # 测试集标签检查(可选)
    if y_test is not None:
        test_counts = np.bincount(y_test) if len(np.unique(y_test)) > 1 else [len(y_test)]
        if len(test_counts) < 2:
            logger.warning(f"[{label_col}]测试集标签仅含1个类别！最终评估可能无效")
        else:
            test_pos_ratio = test_counts[1] / len(y_test)
            logger.info(f"[{label_col}]测试集正样本占比: {test_pos_ratio:.2%}")


def convert_numpy_types(obj: Any) -> Any:
    """递归转换numpy类型为原生Python类型（解决JSON序列化问题）"""
    if isinstance(obj, np.integer):
        return int(obj)
    elif isinstance(obj, np.floating):
        return float(obj)
    elif isinstance(obj, np.ndarray):
        return obj.tolist()
    elif isinstance(obj, dict):
        return {k: convert_numpy_types(v) for k, v in obj.items()}
    elif isinstance(obj, list):
        return [convert_numpy_types(item) for item in obj]
    elif isinstance(obj, tuple):
        return tuple(convert_numpy_types(item) for item in obj)
    else:
        return obj


def save_file_atomic(data: Any, save_path: str) -> bool:
    """原子化保存文件（先写临时文件，再替换目标文件，避免文件损坏）"""
    try:
        temp_path = f"{save_path}.tmp"
        with open(temp_path, "w", encoding="utf-8") as f:
            json.dump(data, f, indent=2, ensure_ascii=False)
        if os.path.exists(save_path):
            os.remove(save_path)
        os.rename(temp_path, save_path)
        return True
    except Exception as e:
        if os.path.exists(temp_path):
            os.remove(temp_path)
        raise RuntimeError(f"原子化保存文件失败: {str(e)}")


def check_version_files(version_dir: str, required_files: List[str]) -> bool:
    """
    检查版本目录下的必需文件是否存在且有效
    有效条件：文件存在 + 非空 + 核心文件（feature_columns/process_info）字段有效
    """
    for f in required_files:
        file_path = os.path.join(version_dir, f)
        logger.info(f'def check_version_files()->file_path: {file_path}')
        # 1. 检查文件是否存在
        if not os.path.exists(file_path):
            logger.warning(f"必需文件不存在: {file_path}")
            return False
        
        # 2. 检查文件是否非空（大小 > 0）
        if os.path.getsize(file_path) == 0:
            logger.warning(f"文件为空，无效: {file_path}")
            return False
        
        # 3. 对核心文件，额外校验字段有效性
        # 3.1 校验 feature_columns.json（必须包含非空的特征列列表）
        if f.endswith("_feature_columns.json"):
            try:
                with open(file_path, "r", encoding="utf-8") as f_obj:
                    feature_cols = json.load(f_obj)
                # 必须是列表，且长度 > 0（至少有1个特征列）
                if not isinstance(feature_cols, list) or len(feature_cols) == 0:
                    logger.warning(f"feature_columns.json 无效（非列表或空列表）: {file_path}")
                    return False
            except Exception as e:
                logger.warning(f"解析 feature_columns.json 失败: {file_path} | 错误: {str(e)}")
                return False
        
        # 3.2 校验 process_info.json（必须包含有效的 feature_info 字段）
        elif f.endswith("_process_info.json"):
            try:
                with open(file_path, "r", encoding="utf-8") as f_obj:
                    process_info = json.load(f_obj)
                # 必须包含 feature_info 字段，且 feature_columns 非空
                feature_info = process_info.get("feature_info", {})
                feature_cols = feature_info.get("feature_columns", [])
                if not isinstance(feature_cols, list) or len(feature_cols) == 0:
                    logger.warning(f"process_info.json 无效（feature_columns 为空）: {file_path}")
                    return False
                # 额外校验 status 为 success（确保训练完成）
                if process_info.get("status") != "success":
                    logger.warning(f"process_info.json 无效（训练未完成，status={process_info.get('status')}）: {file_path}")
                    return False
            except Exception as e:
                logger.warning(f"解析 process_info.json 失败: {file_path} | 错误: {str(e)}")
                return False
    
    # 所有文件都存在且有效
    logger.info(f"版本目录下所有必需文件均有效: {version_dir}")
    return True