#!/usr/bin/env python3
"""
增强的双模型训练器 - LightGBM + XGBoost + 智能集成
集成SHAP可解释性分析和并行训练
"""

import pandas as pd
import numpy as np
from datetime import datetime, date, timedelta
from typing import Dict, List, Optional, Tuple, Any
import logging
import os
import pickle
import json
from pathlib import Path
import concurrent.futures
from concurrent.futures import ThreadPoolExecutor, as_completed

# 机器学习模型
import lightgbm as lgb
import xgboost as xgb
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split

# SHAP可解释性分析
try:
    import shap
    SHAP_AVAILABLE = True
except ImportError:
    SHAP_AVAILABLE = False
    print("警告: SHAP未安装，将跳过可解释性分析功能")

# 数据库相关
from sqlalchemy.orm import Session
from sqlalchemy import text, and_

from backend.config.database import get_db_session
from backend.entities.load_data_new import LoadData
from backend.entities.weather_daily import WeatherDaily
from backend.entities.holiday import HolidayInfo
from backend.entities.model_training_log import ModelTrainingLog
from backend.entities.model_eval import ModelEval
from backend.entities.feat_imp import FeatImp
from backend.entities.shap_analysis import ShapAnalysis
from backend.utils.holiday_rule_engine import is_holiday

logger = logging.getLogger(__name__)

class EnhancedDualTrainer:
    """增强的双模型训练器 - 支持并行训练和SHAP分析"""
    
    def __init__(self):
        self.models = {}
        self.scalers = {}
        self.shap_explainers = {}
        self.model_dir = Path("backend/models/saved_models")
        self.model_dir.mkdir(parents=True, exist_ok=True)
        
        # 优化后的模型配置
        self.lightgbm_config = {
            'objective': 'regression',
            'metric': 'rmse',
            'boosting_type': 'gbdt',
            'num_leaves': 80,  # 减少叶子数，降低过拟合
            'learning_rate': 0.015,  # 降低学习率
            'n_estimators': 1000,  # 增加树数量
            'feature_fraction': 0.7,  # 降低特征采样避免过拟合
            'bagging_fraction': 0.7,  # 降低bagging采样
            'bagging_freq': 5,
            'min_child_samples': 40,  # 增加叶节点样本数
            'min_child_weight': 0.02,  # 增加最小子节点权重
            'reg_alpha': 0.05,  # L1正则化
            'reg_lambda': 0.05,  # L2正则化
            'max_depth': 8,  # 限制树深度
            'verbosity': -1,
            'random_state': 42,
            'force_col_wise': True,  # 优化列式访问
            'extra_trees': True,  # 使用额外随机化
            'n_jobs': -1  # 并行训练
        }
        
        # 根据数据量自适应的XGBoost配置（默认为2年数据配置）
        self.xgboost_config = {
            'objective': 'reg:squarederror',
            'eval_metric': 'rmse',
            'max_depth': 6,  # 降低深度，减少过拟合
            'learning_rate': 0.005,  # 进一步降低学习率
            'n_estimators': 1200,  # 增加树数量补偿低学习率
            'subsample': 0.8,  # 降低采样率，增加随机性
            'colsample_bytree': 0.8,  # 降低特征采样率
            'colsample_bylevel': 0.7,  # 降低层级特征采样
            'colsample_bynode': 0.7,  # 降低节点特征采样
            'gamma': 0.2,  # 增加分裂阈值，减少过拟合
            'reg_alpha': 0.1,  # 增加L1正则化
            'reg_lambda': 0.1,  # 增加L2正则化
            'min_child_weight': 5,  # 增加最小子节点权重
            'max_delta_step': 0,  # 去除权重变化限制
            'tree_method': 'hist',  # 使用直方图优化
            'grow_policy': 'lossguide',  # 改为损失引导增长，更高效
            'max_leaves': 64,  # 减少叶子数限制
            'scale_pos_weight': 1,
            'random_state': 42,
            'verbosity': 0,
            'n_jobs': -1,
            # 新增参数优化
            'monotone_constraints': None,  # 允许非单调关系
            'interaction_constraints': None,  # 允许特征交互
            'max_bin': 256,  # 减少直方图精度
            'enable_categorical': False
        }
        
        # 特征索引映射
        self.feature_indices = {
            'historical_load': list(range(0, 672)),  # 历史负荷 0-671
            'weather': list(range(672, 676)),        # 气象特征 672-675
            'time': list(range(676, 685))            # 时间特征 676-684
        }
        
        # 集成权重（动态计算）
        self.ensemble_weights = {
            'lightgbm': 0.6,
            'xgboost': 0.4
        }
        
        logger.info("增强双模型训练器初始化完成")
    
    def update_model_config(self, model_name: str, config: Dict[str, Any]):
        """更新模型配置"""
        try:
            if model_name == 'lightgbm':
                self.lightgbm_config.update(config)
                # 确保并行训练设置
                if 'n_jobs' not in self.lightgbm_config:
                    self.lightgbm_config['n_jobs'] = -1
            elif model_name == 'xgboost':
                self.xgboost_config.update(config)
                # 确保并行训练设置
                if 'n_jobs' not in self.xgboost_config:
                    self.xgboost_config['n_jobs'] = -1
            
            logger.info(f"更新{model_name}配置: {config}")
            
        except Exception as e:
            logger.error(f"更新模型配置失败: {e}")
    
    def adapt_xgboost_to_data_range(self, train_range: str, sample_count: int):
        """根据数据范围自适应调整XGBoost参数"""
        try:
            logger.info(f"根据数据范围 {train_range} (样本数: {sample_count}) 调整XGBoost参数")
            
            if train_range in ["1_month", "3_months"] or sample_count < 200:
                # 短期数据：保守参数
                adaptive_config = {
                    'max_depth': 5,
                    'learning_rate': 0.05,
                    'n_estimators': 200,
                    'reg_alpha': 0.3,
                    'reg_lambda': 0.3,
                    'gamma': 1.0
                }
                logger.info("使用短期数据保守参数")
            elif train_range in ["6_months", "1_year"] or sample_count < 500:
                # 中期数据：中等参数
                adaptive_config = {
                    'max_depth': 6,
                    'learning_rate': 0.03,
                    'n_estimators': 400,
                    'reg_alpha': 0.1,
                    'reg_lambda': 0.1,
                    'gamma': 0.5
                }
                logger.info("使用中期数据中等参数")
            elif train_range == "2_years" or sample_count < 1000:
                # 2年数据：激进参数
                adaptive_config = {
                    'max_depth': 8,
                    'learning_rate': 0.015,
                    'n_estimators': 600,
                    'reg_alpha': 0.05,
                    'reg_lambda': 0.05,
                    'gamma': 0.2,
                    'max_leaves': 64
                }
                logger.info("使用长期数据激进参数")
            elif train_range in ["3_years", "5_years"] or sample_count < 2000:
                # 3-5年数据：更激进参数
                adaptive_config = {
                    'max_depth': 9,
                    'learning_rate': 0.012,
                    'n_estimators': 800,
                    'reg_alpha': 0.03,
                    'reg_lambda': 0.03,
                    'gamma': 0.15,
                    'max_leaves': 96,
                    'subsample': 0.85,
                    'colsample_bytree': 0.85
                }
                logger.info("使用超长期数据更激进参数")
            else:
                # 8年数据：最激进参数
                adaptive_config = {
                    'max_depth': 10,
                    'learning_rate': 0.01,
                    'n_estimators': 1000,
                    'reg_alpha': 0.01,
                    'reg_lambda': 0.01,
                    'gamma': 0.1,
                    'max_leaves': 128,
                    'subsample': 0.9,
                    'colsample_bytree': 0.9
                }
                logger.info("使用超长期数据最激进参数")
            
            # 更新配置
            self.xgboost_config.update(adaptive_config)
            logger.info(f"XGBoost参数已调整: {adaptive_config}")
            
        except Exception as e:
            logger.error(f"自适应参数调整失败: {e}")
    
    def load_data(self, train_range: str = "3_months") -> Tuple[np.ndarray, np.ndarray]:
        """加载训练数据"""
        try:
            # 确定数据范围
            end_date = date.today() - timedelta(days=1)
            if train_range == "1_month":
                start_date = end_date - timedelta(days=30)
            elif train_range == "3_months":
                start_date = end_date - timedelta(days=90)
            elif train_range == "6_months":
                start_date = end_date - timedelta(days=180)
            elif train_range == "1_year":
                start_date = end_date - timedelta(days=365)
            elif train_range == "2_years":
                start_date = end_date - timedelta(days=730)
            elif train_range == "3_years":
                start_date = end_date - timedelta(days=1095)
            elif train_range == "5_years":
                start_date = end_date - timedelta(days=1825)
            elif train_range == "8_years":
                start_date = end_date - timedelta(days=2920)
            else:
                logger.warning(f"未识别的数据范围: {train_range}，使用默认3个月")
                start_date = end_date - timedelta(days=90)
            
            logger.info(f"加载训练数据: {start_date} 到 {end_date}")
            
            features = []
            labels = []
            
            with get_db_session() as db:
                # 获取日期范围内的所有日期
                current_date = start_date + timedelta(days=7)  # 前7天用作历史数据
                
                while current_date <= end_date:
                    # 准备当日特征
                    feature_vector = self._prepare_daily_features(current_date, db)
                    
                    # 获取当日标签（96个时间点的负荷值）
                    daily_labels = self._get_daily_labels(current_date, db)
                    
                    if feature_vector is not None and daily_labels is not None:
                        features.append(feature_vector)
                        labels.append(daily_labels)
                    
                    current_date += timedelta(days=1)
            
            if not features:
                raise ValueError("没有找到有效的训练数据")
            
            X = np.array(features)
            y = np.array(labels)
            
            logger.info(f"数据加载完成: X.shape={X.shape}, y.shape={y.shape}")
            
            return X, y
            
        except Exception as e:
            logger.error(f"加载数据失败: {e}")
            raise
    
    def _prepare_daily_features(self, target_date: date, db: Session) -> Optional[np.ndarray]:
        """准备单日特征"""
        try:
            feature_vector = []
            
            # 1. 历史负荷特征 (7天 × 96个时间点 = 672维)
            historical_loads = []
            for i in range(7):
                hist_date = target_date - timedelta(days=i+1)
                daily_loads = self._get_daily_loads(hist_date, db)
                if daily_loads is not None:
                    historical_loads.extend(daily_loads)
                else:
                    # 使用平均值填充
                    historical_loads.extend([9000.0] * 96)
            
            feature_vector.extend(historical_loads)
            
            # 2. 气象特征 (4维)
            weather_data = db.query(WeatherDaily).filter(WeatherDaily.dt == target_date).first()
            if weather_data:
                weather_features = [
                    weather_data.t_max or 20.0,
                    weather_data.t_min or 10.0,
                    weather_data.precip or 0.0,
                    weather_data.humidity or 60.0
                ]
            else:
                weather_features = [20.0, 10.0, 0.0, 60.0]
            
            feature_vector.extend(weather_features)
            
            # 3. 时间特征 (9维)
            weekday = target_date.weekday()
            month = target_date.month
            is_holiday_flag = is_holiday(target_date, db)
            
            time_features = [
                1.0 if weekday == i else 0.0 for i in range(7)  # 周几 one-hot
            ] + [
                month / 12.0,  # 月份归一化
                1.0 if is_holiday_flag else 0.0  # 是否节假日
            ]
            
            feature_vector.extend(time_features)
            
            return np.array(feature_vector)
            
        except Exception as e:
            logger.error(f"准备特征失败 {target_date}: {e}")
            return None
    
    def _get_daily_loads(self, target_date: date, db: Session) -> Optional[List[float]]:
        """获取单日负荷数据"""
        try:
            load_records = db.query(LoadData).filter(
                LoadData.dt == target_date
            ).order_by(LoadData.t_idx).all()
            
            if len(load_records) == 96:
                return [record.load_val for record in load_records]
            else:
                return None
                
        except Exception as e:
            logger.error(f"获取负荷数据失败 {target_date}: {e}")
            return None
    
    def _get_daily_labels(self, target_date: date, db: Session) -> Optional[List[float]]:
        """获取单日标签数据"""
        return self._get_daily_loads(target_date, db)
    
    def train_lightgbm(self, X_train: np.ndarray, y_train: np.ndarray,
                      X_test: np.ndarray, y_test: np.ndarray,
                      training_start_time: datetime) -> Dict[str, Any]:
        """训练LightGBM模型"""
        try:
            from sklearn.multioutput import MultiOutputRegressor
            
            logger.info("开始训练LightGBM模型...")
            logger.info(f"训练数据维度: {X_train.shape}, 标签维度: {y_train.shape}")
            
            # 数据预处理
            scaler = StandardScaler()
            X_train_scaled = scaler.fit_transform(X_train)
            X_test_scaled = scaler.transform(X_test)
            
            # 创建LightGBM基础回归器
            base_model = lgb.LGBMRegressor(**self.lightgbm_config)
            
            # 包装为多输出回归器
            model = MultiOutputRegressor(base_model, n_jobs=-1)
            
            # 训练模型
            model.fit(X_train_scaled, y_train)
            
            # 预测
            train_pred = model.predict(X_train_scaled)
            test_pred = model.predict(X_test_scaled)
            
            # 计算指标
            metrics = self._calculate_metrics(y_train, train_pred, y_test, test_pred)
            metrics['training_time'] = (datetime.now() - training_start_time).total_seconds()
            
            # 创建SHAP解释器
            try:
                if SHAP_AVAILABLE:
                    # 对于MultiOutputRegressor，需要使用第一个估计器
                    base_estimator = model.estimators_[0] if hasattr(model, 'estimators_') and len(model.estimators_) > 0 else model
                    explainer = shap.TreeExplainer(base_estimator)
                    # 使用小样本创建解释器以节省内存
                    sample_size = min(10, len(X_test_scaled))  # 减少样本数量
                    shap_values = explainer.shap_values(X_test_scaled[:sample_size])
                    
                    # 计算特征重要性
                    feature_importance = self._analyze_feature_importance(shap_values, 'lightgbm')
                    metrics['feature_importance'] = feature_importance
                    
                    self.shap_explainers['lightgbm'] = explainer
                    logger.info("LightGBM SHAP解释器创建成功")
                else:
                    logger.warning("SHAP未安装，跳过可解释性分析")
                    metrics['feature_importance'] = {}
                
            except Exception as e:
                logger.warning(f"LightGBM SHAP解释器创建失败: {e}")
                metrics['feature_importance'] = {}
            
            # 保存模型到saved_models目录，覆盖旧文件
            model_data = {
                'model': model,
                'scaler': scaler,
                'config': self.lightgbm_config,
                'metrics': metrics,
                'training_time': datetime.now().isoformat(),
                'feature_indices': self.feature_indices
            }
            
            # 确保saved_models目录存在
            self.model_dir.mkdir(parents=True, exist_ok=True)
            
            model_path = self.model_dir / 'lightgbm_model.pkl'
            # 删除旧文件（如果存在）
            if model_path.exists():
                model_path.unlink()
                logger.info(f"删除旧的LightGBM模型文件: {model_path}")
                
            # 保存新模型
            with open(model_path, 'wb') as f:
                pickle.dump(model_data, f)
            logger.info(f"✅ LightGBM模型已保存到: {model_path}")
            
            self.models['lightgbm'] = model
            self.scalers['lightgbm'] = scaler
            
            # 添加详细的控制台输出
            logger.info(f"LightGBM训练完成")
            logger.info(f"训练用时: {metrics['training_time']:.2f}秒")
            logger.info(f"训练集 - MAE: {metrics['train_mae']:.4f}, RMSE: {metrics['train_rmse']:.4f}, R²: {metrics['train_r2']:.4f}")
            logger.info(f"测试集 - MAE: {metrics['mae']:.4f}, RMSE: {metrics['rmse']:.4f}, R²: {metrics['r2']:.4f}")
            logger.info(f"过拟合评分: {metrics['overfitting_score']:.4f}")
            
            # 输出LightGBM参数配置
            logger.info("-" * 40)
            logger.info("🔧 LightGBM 当前参数配置:")
            for key, value in self.lightgbm_config.items():
                logger.info(f"   {key}: {value}")
            logger.info("-" * 40)
            
            # 保存模型评估数据到数据库
            self.save_model_eval_to_db(
                model_name='lightgbm',
                metrics=metrics,
                train_samples=len(X_train),
                test_samples=len(X_test),
                features_count=X_train.shape[1],
                training_time=metrics.get('training_time', 0.0),
                data_range='3_months'
            )
            
            return metrics
            
        except Exception as e:
            logger.error(f"LightGBM训练失败: {e}")
            return {'mae': 0.0, 'rmse': 0.0, 'r2': 0.0, 'mape': 0.0}
    
    def train_xgboost(self, X_train: np.ndarray, y_train: np.ndarray,
                     X_test: np.ndarray, y_test: np.ndarray,
                     training_start_time: datetime) -> Dict[str, Any]:
        """训练XGBoost模型"""
        try:
            from sklearn.multioutput import MultiOutputRegressor
            
            logger.info("开始训练XGBoost模型...")
            logger.info(f"训练数据维度: {X_train.shape}, 标签维度: {y_train.shape}")
            
            # XGBoost特征预处理优化 - 参考LightGBM策略
            from sklearn.preprocessing import StandardScaler
            from sklearn.feature_selection import SelectKBest, f_regression
            
            # 策略1：尝试不使用特征选择，保留完整特征（参考LightGBM）
            use_feature_selection = X_train.shape[1] > 800  # 只在特征过多时使用
            
            if use_feature_selection:
                # 保留更多特征，减少信息损失
                n_features = min(650, X_train.shape[1])  # 从600增加到650
                selector = SelectKBest(score_func=f_regression, k=n_features)
                X_train_selected = selector.fit_transform(X_train, y_train.mean(axis=1))
                X_test_selected = selector.transform(X_test)
                logger.info(f"使用特征选择：{X_train.shape[1]} -> {n_features}")
            else:
                # 不使用特征选择，保留所有特征
                X_train_selected = X_train
                X_test_selected = X_test
                selector = None
                logger.info(f"不使用特征选择，保留所有{X_train.shape[1]}个特征")
            
            # 使用StandardScaler替代RobustScaler（与LightGBM一致）
            scaler = StandardScaler()
            X_train_scaled = scaler.fit_transform(X_train_selected)
            X_test_scaled = scaler.transform(X_test_selected)
            
            # 创建XGBoost基础回归器
            base_model = xgb.XGBRegressor(**self.xgboost_config)
            
            # 包装为多输出回归器
            model = MultiOutputRegressor(base_model, n_jobs=-1)
            
            # 训练模型
            model.fit(X_train_scaled, y_train)
            
            # 预测
            train_pred = model.predict(X_train_scaled)
            test_pred = model.predict(X_test_scaled)
            
            # 计算指标
            metrics = self._calculate_metrics(y_train, train_pred, y_test, test_pred)
            metrics['training_time'] = (datetime.now() - training_start_time).total_seconds()
            
            # 创建SHAP解释器
            try:
                if SHAP_AVAILABLE:
                    # 对于MultiOutputRegressor，需要使用第一个估计器
                    base_estimator = model.estimators_[0] if hasattr(model, 'estimators_') and len(model.estimators_) > 0 else model
                    explainer = shap.TreeExplainer(base_estimator)
                    # 使用小样本创建解释器以节省内存
                    sample_size = min(10, len(X_test))  # 减少样本数量
                    shap_values = explainer.shap_values(X_test_scaled[:sample_size])  # XGBoost使用预处理后数据
                    
                    # 计算特征重要性
                    try:
                        if use_feature_selection and selector is not None:
                            # 使用特征选择时的分析
                            selected_feature_indices = self._create_selected_feature_indices(selector, len(X_train_selected[0]))
                            feature_importance = self._analyze_feature_importance_selected(shap_values, 'xgboost', selected_feature_indices)
                        else:
                            # 不使用特征选择时，直接分析（与LightGBM一致）
                            feature_importance = self._analyze_feature_importance(shap_values, 'xgboost')
                        metrics['feature_importance'] = feature_importance
                    except Exception as e:
                        logger.error(f"XGBoost特征重要性分析失败: {e}")
                        metrics['feature_importance'] = {}
                    
                    self.shap_explainers['xgboost'] = explainer
                    logger.info("XGBoost SHAP解释器创建成功")
                else:
                    logger.warning("SHAP未安装，跳过可解释性分析")
                    metrics['feature_importance'] = {}
                
            except Exception as e:
                logger.warning(f"XGBoost SHAP解释器创建失败: {e}")
                metrics['feature_importance'] = {}
            
            # 保存模型到saved_models目录，覆盖旧文件
            model_data = {
                'model': model,
                'selector': selector,  # 保存特征选择器（可能为None）
                'scaler': scaler,     # 保存缩放器
                'selected_features': len(X_train_selected[0]),  # 保存实际特征数量
                'config': self.xgboost_config,
                'metrics': metrics,
                'training_time': datetime.now().isoformat(),
                'feature_indices': self.feature_indices
            }
            
            # 确保saved_models目录存在
            self.model_dir.mkdir(parents=True, exist_ok=True)
            
            model_path = self.model_dir / 'xgboost_model.pkl'
            # 删除旧文件（如果存在）
            if model_path.exists():
                model_path.unlink()
                logger.info(f"删除旧的XGBoost模型文件: {model_path}")
                
            # 保存新模型
            with open(model_path, 'wb') as f:
                pickle.dump(model_data, f)
            logger.info(f"✅ XGBoost模型已保存到: {model_path}")
            
            self.models['xgboost'] = model
            self.scalers['xgboost'] = {'selector': selector, 'scaler': scaler}
            
            # 添加详细的控制台输出
            logger.info(f"XGBoost训练完成")
            logger.info(f"训练用时: {metrics['training_time']:.2f}秒")
            logger.info(f"训练集 - MAE: {metrics['train_mae']:.4f}, RMSE: {metrics['train_rmse']:.4f}, R²: {metrics['train_r2']:.4f}")
            logger.info(f"测试集 - MAE: {metrics['mae']:.4f}, RMSE: {metrics['rmse']:.4f}, R²: {metrics['r2']:.4f}")
            logger.info(f"过拟合评分: {metrics['overfitting_score']:.4f}")
            
            # 输出XGBoost参数配置
            logger.info("-" * 40)
            logger.info("🔧 XGBoost 当前参数配置:")
            for key, value in self.xgboost_config.items():
                logger.info(f"   {key}: {value}")
            logger.info("-" * 40)
            
            # 保存模型评估数据到数据库
            self.save_model_eval_to_db(
                model_name='xgboost',
                metrics=metrics,
                train_samples=len(X_train),
                test_samples=len(X_test),
                features_count=X_train.shape[1],
                training_time=metrics.get('training_time', 0.0),
                data_range='3_months'
            )
            
            return metrics
            
        except Exception as e:
            logger.error(f"XGBoost训练失败: {e}")
            return {'mae': 0.0, 'rmse': 0.0, 'r2': 0.0, 'mape': 0.0}
    
    def train_all_models_parallel(self, train_range: str = "3_months") -> Dict[str, Dict[str, Any]]:
        """并行训练所有模型"""
        try:
            logger.info("=" * 80)
            logger.info("🚀 增强双模型并行训练开始")
            logger.info("=" * 80)
            logger.info(f"📅 训练数据范围: {train_range}")
            logger.info(f"🎯 训练模型: LightGBM + XGBoost")
            logger.info(f"⚡ 并行训练: 启用")
            logger.info(f"🔍 SHAP分析: 启用")
            logger.info("-" * 50)
            logger.info("📊 LightGBM 配置:")
            for key, value in self.lightgbm_config.items():
                logger.info(f"   {key}: {value}")
            logger.info("-" * 50)
            logger.info("📊 XGBoost 配置:")
            for key, value in self.xgboost_config.items():
                logger.info(f"   {key}: {value}")
            logger.info("=" * 80)
            
            # 加载数据
            X, y = self.load_data(train_range)
            
            # 根据数据范围自适应调整XGBoost参数
            self.adapt_xgboost_to_data_range(train_range, len(X))
            
            # 划分训练测试集
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, test_size=0.2, random_state=42
            )
            
            logger.info(f"训练集: {X_train.shape}, 测试集: {X_test.shape}")
            
            results = {}
            training_start_time = datetime.now()
            
            # 使用线程池并行训练
            with ThreadPoolExecutor(max_workers=2) as executor:
                # 提交训练任务
                future_to_model = {
                    executor.submit(self.train_lightgbm, X_train, y_train, X_test, y_test, training_start_time): 'lightgbm',
                    executor.submit(self.train_xgboost, X_train, y_train, X_test, y_test, training_start_time): 'xgboost'
                }
                
                # 收集结果
                for future in as_completed(future_to_model):
                    model_name = future_to_model[future]
                    try:
                        result = future.result()
                        results[model_name] = result
                        logger.info(f"{model_name}训练完成")
                    except Exception as e:
                        logger.error(f"{model_name}训练异常: {e}")
                        results[model_name] = {'mae': 0.0, 'rmse': 0.0, 'r2': 0.0, 'mape': 0.0}
            
            # 计算动态集成权重并创建集成模型结果
            if all(results.values()):
                dynamic_weights = self._calculate_dynamic_weights(results)
                self.ensemble_weights = dynamic_weights
                logger.info(f"动态集成权重: {dynamic_weights}")
                
                # 创建集成模型结果
                ensemble_result = {
                    'mae': sum(results[model]['mae'] * weight for model, weight in dynamic_weights.items()),
                    'rmse': sum(results[model]['rmse'] * weight for model, weight in dynamic_weights.items()),
                    'r2': sum(results[model]['r2'] * weight for model, weight in dynamic_weights.items()),
                    'mape': sum(results[model]['mape'] * weight for model, weight in dynamic_weights.items()),
                    'weights': dynamic_weights,
                    'training_time': sum(results[model].get('training_time', 0) for model in results.keys())
                }
                results['ensemble'] = ensemble_result
                logger.info(f"集成模型配置完成: MAE={ensemble_result['mae']:.4f}, "
                           f"RMSE={ensemble_result['rmse']:.4f}, R²={ensemble_result['r2']:.4f}")
            
            # 保存集成配置到saved_models目录，覆盖旧文件
            # 只保存JSON可序列化的性能指标
            json_safe_metrics = {}
            for model_name, metrics in results.items():
                if isinstance(metrics, dict):
                    json_safe_metrics[model_name] = {
                        'mae': float(metrics.get('mae', 0)),
                        'rmse': float(metrics.get('rmse', 0)),
                        'r2': float(metrics.get('r2', 0)),
                        'mape': float(metrics.get('mape', 0)),
                        'training_time': float(metrics.get('training_time', 0))
                    }
            
            ensemble_config = {
                'weights': self.ensemble_weights,
                'models': list(results.keys()),
                'training_time': datetime.now().isoformat(),
                'data_range': train_range,
                'performance_metrics': json_safe_metrics
            }
            
            # 确保saved_models目录存在
            self.model_dir.mkdir(parents=True, exist_ok=True)
            
            config_path = self.model_dir / 'ensemble_config.json'
            # 删除旧配置文件（如果存在）
            if config_path.exists():
                config_path.unlink()
                logger.info(f"删除旧的集成配置文件: {config_path}")
                
            # 保存新配置
            with open(config_path, 'w') as f:
                json.dump(ensemble_config, f, indent=2)
            logger.info(f"✅ 集成配置已保存到: {config_path}")
            
            # 保存模型数据到数据库
            for model_name, metrics in results.items():
                if metrics.get('mae', 0) > 0:
                    # 保存训练日志到数据库
                    self._save_training_log(model_name, metrics, train_range, training_start_time)
                    
                    # 为并行训练中的基础模型，数据库保存已在各自的训练函数中完成
                    # 这里主要处理集成模型的数据库保存
                    if model_name == 'ensemble':
                        self.save_model_eval_to_db(
                            model_name='ensemble',
                            metrics=metrics,
                            train_samples=len(X_train),
                            test_samples=len(X_test),
                            features_count=X_train.shape[1],
                            training_time=metrics.get('training_time', 0.0),
                            data_range=train_range
                        )
                    
                    # 保存特征重要性（集成模型跳过，因为它没有独立的特征重要性）
                    if 'feature_importance' in metrics and model_name != 'ensemble':
                        self._save_feature_importance(model_name, metrics['feature_importance'])
                        # 保存SHAP可解释性分析
                        self._save_shap_analysis(model_name, metrics['feature_importance'])
                    
                    # 为集成模型保存特殊的权重信息到feat_imp表
                    if model_name == 'ensemble' and 'weights' in metrics:
                        self._save_ensemble_weights(model_name, metrics['weights'])
                        # 为集成模型生成加权的SHAP可解释性分析
                        self._save_ensemble_shap_analysis(model_name, metrics['weights'], results)
            
            total_time = (datetime.now() - training_start_time).total_seconds()
            logger.info("=" * 60)
            logger.info("增强双模型并行训练完成!")
            logger.info(f"总训练耗时: {total_time:.2f}秒")
            
            # 输出所有模型的详细结果
            for model_name, metrics in results.items():
                if 'error' not in metrics and metrics.get('mae', 0) > 0:
                    logger.info(f"{model_name.upper()}: MAE={metrics['mae']:.4f}, "
                               f"RMSE={metrics['rmse']:.4f}, R²={metrics['r2']:.4f}")
            
            # 输出当前模型参数配置，方便调整
            logger.info("=" * 80)
            logger.info("📊 当前模型参数配置 (用于调整):")
            logger.info("=" * 80)
            logger.info("🔧 LightGBM 参数配置:")
            for key, value in self.lightgbm_config.items():
                logger.info(f"   {key}: {value}")
            logger.info("-" * 50)
            logger.info("🔧 XGBoost 参数配置:")
            for key, value in self.xgboost_config.items():
                logger.info(f"   {key}: {value}")
            logger.info("-" * 50)
            logger.info("🔧 集成权重配置:")
            for model, weight in self.ensemble_weights.items():
                logger.info(f"   {model}: {weight:.3f}")
            logger.info("=" * 80)
            logger.info("💡 参数调整建议:")
            logger.info("   - 如果过拟合严重，降低 max_depth, learning_rate, 增加正则化")
            logger.info("   - 如果欠拟合，增加 max_depth, n_estimators, 降低正则化")
            logger.info("   - 如果训练时间过长，减少 n_estimators, 增加 learning_rate")
            logger.info("   - 如果精度不够，增加 n_estimators, 降低 learning_rate")
            logger.info("=" * 80)
            
            return results
            
        except Exception as e:
            logger.error(f"并行训练失败: {e}")
            return {}
    
    def _analyze_feature_importance(self, shap_values: np.ndarray, model_name: str) -> Dict[str, Any]:
        """分析特征重要性"""
        try:
            # 计算平均SHAP值
            mean_shap = np.mean(np.abs(shap_values), axis=0)
            
            # 按特征类别分组
            feature_importance = {
                'raw_shap_values': shap_values  # 保存原始SHAP值以供后续使用
            }
            
            # 历史负荷特征重要性
            historical_importance = mean_shap[self.feature_indices['historical_load']]
            feature_importance['historical_load'] = {
                'total_importance': float(np.sum(historical_importance)),
                'average_importance': float(np.mean(historical_importance)),
                'max_importance': float(np.max(historical_importance)),
                'contribution_ratio': float(np.sum(historical_importance) / np.sum(mean_shap))
            }
            
            # 气象特征重要性
            weather_importance = mean_shap[self.feature_indices['weather']]
            feature_importance['weather'] = {
                'temperature_max_importance': float(weather_importance[0]),
                'temperature_min_importance': float(weather_importance[1]),
                'precipitation_importance': float(weather_importance[2]),
                'humidity_importance': float(weather_importance[3]),
                'total_importance': float(np.sum(weather_importance)),
                'contribution_ratio': float(np.sum(weather_importance) / np.sum(mean_shap))
            }
            
            # 时间特征重要性
            time_importance = mean_shap[self.feature_indices['time']]
            feature_importance['time'] = {
                'weekday_importance': float(np.sum(time_importance[:7])),
                'month_importance': float(time_importance[7]),
                'holiday_importance': float(time_importance[8]),
                'total_importance': float(np.sum(time_importance)),
                'contribution_ratio': float(np.sum(time_importance) / np.sum(mean_shap))
            }
            
            # 生成可解释性分析
            interpretability_analysis = self._generate_interpretability_analysis(feature_importance)
            feature_importance['interpretability_analysis'] = interpretability_analysis
            
            return feature_importance
            
        except Exception as e:
            logger.error(f"特征重要性分析失败: {e}")
            return {}
    
    def _create_selected_feature_indices(self, selector, n_features: int) -> Dict[str, List[int]]:
        """创建特征选择后的索引映射"""
        try:
            # 获取被选择的特征索引
            selected_mask = selector.get_support()
            selected_original_indices = np.where(selected_mask)[0]
            
            # 创建新的索引映射
            new_indices = {}
            current_idx = 0
            
            for original_idx in selected_original_indices:
                if original_idx < 672:  # 历史负荷特征
                    if 'historical_load' not in new_indices:
                        new_indices['historical_load'] = []
                    new_indices['historical_load'].append(current_idx)
                elif 672 <= original_idx < 676:  # 气象特征
                    if 'weather' not in new_indices:
                        new_indices['weather'] = []
                    new_indices['weather'].append(current_idx)
                elif 676 <= original_idx < 685:  # 时间特征
                    if 'time' not in new_indices:
                        new_indices['time'] = []
                    new_indices['time'].append(current_idx)
                current_idx += 1
                
            return new_indices
        except Exception as e:
            logger.error(f"创建特征索引映射失败: {e}")
            return {'historical_load': [], 'weather': [], 'time': []}
    
    def _analyze_feature_importance_selected(self, shap_values: np.ndarray, model_name: str, 
                                           selected_indices: Dict[str, List[int]]) -> Dict[str, Any]:
        """分析特征选择后的特征重要性"""
        try:
            # 处理SHAP值的维度问题
            if isinstance(shap_values, list):
                shap_values = shap_values[0]  # 对于回归任务取第一个
            
            # 检查维度
            if len(shap_values.shape) > 2:
                mean_shap = np.mean(np.abs(shap_values), axis=0)
                if len(mean_shap.shape) > 1:
                    mean_shap = np.mean(mean_shap, axis=1)
            else:
                mean_shap = np.mean(np.abs(shap_values), axis=0)
            
            logger.info(f"SHAP值维度: {shap_values.shape}, 平均SHAP维度: {mean_shap.shape}")
            
            feature_importance = {
                'raw_shap_values': shap_values
            }
            
            # 历史负荷特征重要性
            if selected_indices.get('historical_load'):
                hist_indices = selected_indices['historical_load']
                if all(idx < len(mean_shap) for idx in hist_indices):
                    historical_importance = mean_shap[hist_indices]
                    feature_importance['historical_load'] = {
                        'total_importance': float(np.sum(historical_importance)),
                        'average_importance': float(np.mean(historical_importance)),
                        'max_importance': float(np.max(historical_importance)),
                        'contribution_ratio': float(np.sum(historical_importance) / np.sum(mean_shap))
                    }
                else:
                    logger.warning(f"历史负荷特征索引超出范围: {max(hist_indices)} >= {len(mean_shap)}")
                    feature_importance['historical_load'] = {'contribution_ratio': 0.0}
            else:
                feature_importance['historical_load'] = {'contribution_ratio': 0.0}
            
            # 气象特征重要性
            if selected_indices.get('weather'):
                weather_indices = selected_indices['weather']
                if all(idx < len(mean_shap) for idx in weather_indices):
                    weather_importance = mean_shap[weather_indices]
                    # 提供完整的气象特征结构，即使数据有限
                    weather_data = {
                        'total_importance': float(np.sum(weather_importance)),
                        'contribution_ratio': float(np.sum(weather_importance) / np.sum(mean_shap))
                    }
                    # 尝试分解气象特征（如果有足够的特征）
                    if len(weather_importance) >= 4:
                        weather_data.update({
                            'temperature_max_importance': float(weather_importance[0]),
                            'temperature_min_importance': float(weather_importance[1]),
                            'precipitation_importance': float(weather_importance[2]),
                            'humidity_importance': float(weather_importance[3])
                        })
                    else:
                        # 平均分配或设为0
                        avg_importance = float(np.mean(weather_importance)) if len(weather_importance) > 0 else 0.0
                        weather_data.update({
                            'temperature_max_importance': avg_importance,
                            'temperature_min_importance': avg_importance,
                            'precipitation_importance': avg_importance,
                            'humidity_importance': avg_importance
                        })
                    feature_importance['weather'] = weather_data
                else:
                    logger.warning(f"气象特征索引超出范围: {max(weather_indices)} >= {len(mean_shap)}")
                    feature_importance['weather'] = self._get_default_weather_importance()
            else:
                feature_importance['weather'] = self._get_default_weather_importance()
            
            # 时间特征重要性
            if selected_indices.get('time'):
                time_indices = selected_indices['time']
                if all(idx < len(mean_shap) for idx in time_indices):
                    time_importance = mean_shap[time_indices]
                    # 提供完整的时间特征结构
                    time_data = {
                        'total_importance': float(np.sum(time_importance)),
                        'contribution_ratio': float(np.sum(time_importance) / np.sum(mean_shap))
                    }
                    # 尝试分解时间特征（工作日+月份+节假日）
                    if len(time_importance) >= 9:
                        time_data.update({
                            'weekday_importance': float(np.sum(time_importance[:7])),  # 前7个是工作日
                            'month_importance': float(time_importance[7]),  # 第8个是月份
                            'holiday_importance': float(time_importance[8])  # 第9个是节假日
                        })
                    else:
                        # 估算分配
                        total_time_imp = float(np.sum(time_importance))
                        time_data.update({
                            'weekday_importance': total_time_imp * 0.6,
                            'month_importance': total_time_imp * 0.3,
                            'holiday_importance': total_time_imp * 0.1
                        })
                    feature_importance['time'] = time_data
                else:
                    logger.warning(f"时间特征索引超出范围: {max(time_indices)} >= {len(mean_shap)}")
                    feature_importance['time'] = self._get_default_time_importance()
            else:
                feature_importance['time'] = self._get_default_time_importance()
            
            # 生成可解释性分析
            interpretability_analysis = self._generate_interpretability_analysis(feature_importance)
            feature_importance['interpretability_analysis'] = interpretability_analysis
            
            return feature_importance
            
        except Exception as e:
            logger.error(f"特征重要性分析失败: {e}")
            import traceback
            traceback.print_exc()
            # 返回默认结构而不是空字典
            return {
                'historical_load': {'contribution_ratio': 0.0},
                'weather': self._get_default_weather_importance(),
                'time': self._get_default_time_importance(),
                'interpretability_analysis': {}
            }
    
    def _get_default_weather_importance(self) -> Dict[str, float]:
        """获取默认的气象特征重要性结构"""
        return {
            'contribution_ratio': 0.0,
            'temperature_max_importance': 0.0,
            'temperature_min_importance': 0.0,
            'precipitation_importance': 0.0,
            'humidity_importance': 0.0,
            'total_importance': 0.0
        }
    
    def _get_default_time_importance(self) -> Dict[str, float]:
        """获取默认的时间特征重要性结构"""
        return {
            'contribution_ratio': 0.0,
            'weekday_importance': 0.0,
            'month_importance': 0.0,
            'holiday_importance': 0.0,
            'total_importance': 0.0
        }
    
    def _generate_interpretability_analysis(self, feature_importance: Dict[str, Any]) -> Dict[str, str]:
        """生成可解释性分析"""
        try:
            analysis = {}
            
            # 历史负荷影响分析
            hist_ratio = feature_importance['historical_load']['contribution_ratio']
            if hist_ratio > 0.7:
                analysis['historical_load_impact'] = "历史负荷是最主要的预测因素，模型主要依赖历史负荷模式"
            elif hist_ratio > 0.5:
                analysis['historical_load_impact'] = "历史负荷是重要的预测因素，与其他因素共同影响预测"
            else:
                analysis['historical_load_impact'] = "历史负荷影响相对较小，其他因素更为重要"
            
            # 气象因素影响分析
            weather_ratio = feature_importance['weather']['contribution_ratio']
            weather_detail = feature_importance['weather']
            
            if weather_ratio > 0.2:
                analysis['weather_impact'] = "气象因素对负荷预测有显著影响"
                
                # 详细分析各气象因素
                temp_importance = weather_detail['temperature_max_importance'] + weather_detail['temperature_min_importance']
                precip_importance = weather_detail['precipitation_importance']
                humid_importance = weather_detail['humidity_importance']
                
                if temp_importance > max(precip_importance, humid_importance):
                    analysis['weather_detail'] = "温度是最重要的气象因素"
                elif precip_importance > humid_importance:
                    analysis['weather_detail'] = "降水对负荷预测影响最大"
                else:
                    analysis['weather_detail'] = "湿度是主要的气象影响因素"
            else:
                analysis['weather_impact'] = "气象因素对负荷预测影响较小"
            
            # 时间因素影响分析
            time_ratio = feature_importance['time']['contribution_ratio']
            time_detail = feature_importance['time']
            
            if time_ratio > 0.1:
                analysis['time_impact'] = "时间因素对负荷预测有明显影响"
                
                if time_detail['weekday_importance'] > time_detail['month_importance']:
                    analysis['time_detail'] = "工作日模式比季节性模式更重要"
                else:
                    analysis['time_detail'] = "季节性模式比工作日模式更重要"
                
                if time_detail['holiday_importance'] > 0.05:
                    analysis['holiday_impact'] = "节假日对负荷预测有显著影响"
                else:
                    analysis['holiday_impact'] = "节假日影响相对较小"
            else:
                analysis['time_impact'] = "时间因素对负荷预测影响较小"
            
            return analysis
            
        except Exception as e:
            logger.error(f"生成可解释性分析失败: {e}")
            return {}
    
    def _calculate_dynamic_weights(self, results: Dict[str, Dict[str, Any]]) -> Dict[str, float]:
        """基于性能动态计算集成权重"""
        try:
            weights = {}
            total_score = 0
            
            for model_name, metrics in results.items():
                if metrics.get('mae', 0) > 0:
                    # 使用MAE的倒数作为权重基础
                    score = 1.0 / (metrics['mae'] + 1e-8)
                    weights[model_name] = score
                    total_score += score
                else:
                    weights[model_name] = 0
            
            # 归一化权重
            if total_score > 0:
                for model_name in weights:
                    weights[model_name] /= total_score
            else:
                # 如果都失败，使用默认权重
                weights = self.ensemble_weights
            
            return weights
            
        except Exception as e:
            logger.error(f"计算动态权重失败: {e}")
            return self.ensemble_weights
    
    def _calculate_metrics(self, y_train: np.ndarray, train_pred: np.ndarray,
                          y_test: np.ndarray, test_pred: np.ndarray) -> Dict[str, float]:
        """计算评估指标"""
        try:
            # 测试集指标
            test_mae = mean_absolute_error(y_test.flatten(), test_pred.flatten())
            test_rmse = np.sqrt(mean_squared_error(y_test.flatten(), test_pred.flatten()))
            test_r2 = r2_score(y_test.flatten(), test_pred.flatten())
            test_mape = np.mean(np.abs((y_test.flatten() - test_pred.flatten()) / (y_test.flatten() + 1e-8))) * 100
            
            # 训练集指标
            train_mae = mean_absolute_error(y_train.flatten(), train_pred.flatten())
            train_rmse = np.sqrt(mean_squared_error(y_train.flatten(), train_pred.flatten()))
            train_r2 = r2_score(y_train.flatten(), train_pred.flatten())
            train_mape = np.mean(np.abs((y_train.flatten() - train_pred.flatten()) / (y_train.flatten() + 1e-8))) * 100
            
            # 过拟合评分
            overfitting_score = abs(train_mae - test_mae) / test_mae if test_mae > 0 else 0.0
            
            return {
                'mae': float(test_mae),
                'rmse': float(test_rmse),
                'r2': float(test_r2),
                'mape': float(test_mape),
                'train_mae': float(train_mae),
                'train_rmse': float(train_rmse),
                'train_r2': float(train_r2),
                'train_mape': float(train_mape),
                'overfitting_score': float(overfitting_score)
            }
            
        except Exception as e:
            logger.error(f"计算指标失败: {e}")
            return {'mae': 0.0, 'rmse': 0.0, 'r2': 0.0, 'mape': 0.0}
    
    def _save_training_log(self, model_name: str, metrics: Dict[str, float],
                          train_range: str, training_start_time: datetime):
        """保存训练日志到数据库 - 参考multi_model_trainer.py"""
        try:
            current_time = datetime.now()
            
            with get_db_session() as db:
                # 保存训练日志
                existing_log = db.query(ModelTrainingLog).filter(
                    ModelTrainingLog.model == model_name,
                    ModelTrainingLog.train_start >= training_start_time.date(),
                    ModelTrainingLog.train_start < training_start_time.date() + timedelta(days=1)
                ).first()
                
                if existing_log:
                    # 更新现有记录
                    existing_log.train_start = training_start_time
                    existing_log.train_end = current_time
                    existing_log.status = 'completed'
                    existing_log.learning_rate = self.lightgbm_config['learning_rate'] if model_name == 'lightgbm' else self.xgboost_config['learning_rate']
                    existing_log.train_loss = metrics.get('train_mae', 0.0)
                    existing_log.val_loss = metrics.get('mae', 0.0)
                    existing_log.train_acc = metrics.get('train_r2', 0.0)
                    existing_log.val_acc = metrics.get('r2', 0.0)
                    existing_log.progress = 100.0
                    existing_log.config_json = f'{{"model": "{model_name}", "train_range": "{train_range}"}}'
                    logger.info(f"更新现有训练日志记录: {model_name}")
                else:
                    # 创建新记录
                    training_log = ModelTrainingLog(
                        model=model_name,
                        train_start=training_start_time,
                        train_end=current_time,
                        status='completed',
                        learning_rate=self.lightgbm_config['learning_rate'] if model_name == 'lightgbm' else self.xgboost_config['learning_rate'],
                        train_loss=metrics.get('train_mae', 0.0),
                        val_loss=metrics.get('mae', 0.0),
                        train_acc=metrics.get('train_r2', 0.0),
                        val_acc=metrics.get('r2', 0.0),
                        progress=100.0,
                        config_json=f'{{"model": "{model_name}", "train_range": "{train_range}"}}'
                    )
                    db.add(training_log)
                    logger.info(f"创建新训练日志记录: {model_name}")
                
                # 保存模型评估结果
                current_date = current_time.date()
                existing_eval = db.query(ModelEval).filter(
                    ModelEval.model == model_name,
                    ModelEval.train_dt == current_date
                ).first()
                
                if not existing_eval:
                    existing_eval = db.query(ModelEval).filter(
                        ModelEval.model == model_name
                    ).order_by(ModelEval.created_at.desc()).first()
                
                if existing_eval:
                    # 更新现有记录
                    existing_eval.mae = metrics.get('mae', 0.0)
                    existing_eval.rmse = metrics.get('rmse', 0.0)
                    existing_eval.r2 = metrics.get('r2', 0.0)
                    existing_eval.mape = metrics.get('mape', 0.0)
                    existing_eval.max_err = metrics.get('rmse', 0.0) * 2
                    existing_eval.train_dt = current_date
                    existing_eval.created_at = current_time
                    logger.info(f"更新现有模型评估记录: {model_name}, MAE={existing_eval.mae:.4f}, R²={existing_eval.r2:.4f}")
                else:
                    # 创建新记录
                    model_eval = ModelEval(
                        model=model_name,
                        train_dt=current_date,
                        mae=metrics.get('mae', 0.0),
                        rmse=metrics.get('rmse', 0.0),
                        r2=metrics.get('r2', 0.0),
                        mape=metrics.get('mape', 0.0),
                        max_err=metrics.get('rmse', 0.0) * 2,
                        created_at=current_time
                    )
                    db.add(model_eval)
                    logger.info(f"创建新模型评估记录: {model_name}, MAE={model_eval.mae:.4f}, R²={model_eval.r2:.4f}")
                
                db.commit()
                logger.info(f"✅ 训练日志和模型评估数据已成功保存到数据库: {model_name}")
                
                # 验证数据是否确实保存成功
                verification = db.query(ModelEval).filter(
                    ModelEval.model == model_name
                ).order_by(ModelEval.created_at.desc()).first()
                
                if verification:
                    logger.info(f"✅ 数据验证成功: {model_name} - MAE={verification.mae:.4f}, 创建时间={verification.created_at}")
                else:
                    logger.warning(f"⚠️ 数据验证失败: {model_name} - 未找到保存的记录")
                
        except Exception as e:
            logger.error(f"❌ 保存训练日志失败 {model_name}: {e}")
            import traceback
            traceback.print_exc()
    
    def _save_feature_importance(self, model_name: str, feature_importance: Dict[str, Any]):
        """保存特征重要性数据到数据库 - 使用真实的SHAP值"""
        try:
            if not feature_importance:
                logger.warning(f"模型 {model_name} 没有特征重要性数据")
                return
            
            # 检查是否有原始SHAP值数据
            if 'raw_shap_values' not in feature_importance:
                logger.warning(f"模型 {model_name} 没有原始SHAP值数据，跳过特征重要性保存")
                return
                
            raw_shap_values = feature_importance['raw_shap_values']
            
            # 计算每个特征的平均绝对SHAP值
            mean_shap = np.mean(np.abs(raw_shap_values), axis=0)
            
            # 构建特征名称列表
            feature_names = []
            
            # 历史负荷特征
            historical_indices = self.feature_indices['historical_load']
            for i, idx in enumerate(historical_indices):
                day = (i // 96) + 1
                time_point = (i % 96) + 1
                feature_names.append(f"hist_load_day{day}_t{time_point}")
                
            # 气象特征
            weather_features = ['temp_max', 'temp_min', 'precip', 'humidity']
            for feat in weather_features:
                feature_names.append(f"weather_{feat}")
                
            # 时间特征
            time_features = ['mon', 'tue', 'wed', 'thu', 'fri', 'sat', 'sun', 'month', 'holiday']
            for feat in time_features:
                feature_names.append(f"time_{feat}")
            
            # 确保特征名称数量与SHAP值数量一致
            if len(feature_names) != len(mean_shap):
                logger.warning(f"特征名称数量({len(feature_names)})与SHAP值数量({len(mean_shap)})不匹配，调整特征名称")
                # 如果特征名称不够，用索引补充
                while len(feature_names) < len(mean_shap):
                    feature_names.append(f"feature_{len(feature_names)}")
                # 如果特征名称过多，截取
                feature_names = feature_names[:len(mean_shap)]
                
            logger.info(f"开始保存特征重要性数据到数据库: {model_name}, 特征数量: {len(feature_names)}")
            
            with get_db_session() as db:
                # 先删除该模型今天的特征重要性数据
                db.query(FeatImp).filter(
                    FeatImp.model == model_name,
                    FeatImp.train_dt == datetime.now().date()
                ).delete()
                
                # 按重要性排序
                sorted_indices = np.argsort(mean_shap)[::-1]
                
                saved_count = 0
                for i, idx in enumerate(sorted_indices[:50]):  # 只保存前50个最重要的特征
                    if idx >= len(feature_names):
                        logger.warning(f"特征索引 {idx} 超出范围，跳过")
                        continue
                    
                    feature_name = feature_names[idx]
                    importance_score = float(mean_shap[idx])
                    
                    # 跳过重要性为0的特征
                    if importance_score == 0.0:
                        continue
                    
                    feat_imp = FeatImp(
                        model=model_name,
                        feat=feature_name,
                        imp=importance_score,
                        train_dt=datetime.now().date()
                    )
                    
                    db.add(feat_imp)
                    saved_count += 1
                
                db.commit()
                logger.info(f"✅ 特征重要性数据已保存: {model_name}, 保存数量: {saved_count}")
                
        except Exception as e:
            logger.error(f"❌ 保存特征重要性数据失败 {model_name}: {e}")
            import traceback
            traceback.print_exc()
    
    def _save_shap_analysis(self, model_name: str, feature_importance: Dict[str, Any]):
        """保存SHAP可解释性分析到数据库"""
        try:
            if not feature_importance:
                logger.warning(f"模型 {model_name} 没有SHAP分析数据")
                return
                
            current_date = datetime.now().date()
            
            with get_db_session() as db:
                # 先删除该模型今天的SHAP分析数据
                db.query(ShapAnalysis).filter(
                    ShapAnalysis.model == model_name,
                    ShapAnalysis.train_dt == current_date
                ).delete()
                
                # 提取特征贡献度
                historical_contribution = feature_importance.get('historical_load', {}).get('contribution_ratio', 0.0)
                weather_contribution = feature_importance.get('weather', {}).get('contribution_ratio', 0.0)
                time_contribution = feature_importance.get('time', {}).get('contribution_ratio', 0.0)
                
                # 提取气象因素重要性
                weather_data = feature_importance.get('weather', {})
                temp_max_imp = weather_data.get('temperature_max_importance', 0.0)
                temp_min_imp = weather_data.get('temperature_min_importance', 0.0)
                precip_imp = weather_data.get('precipitation_importance', 0.0)
                humidity_imp = weather_data.get('humidity_importance', 0.0)
                
                # 提取时间因素重要性
                time_data = feature_importance.get('time', {})
                weekday_imp = time_data.get('weekday_importance', 0.0)
                month_imp = time_data.get('month_importance', 0.0)
                holiday_imp = time_data.get('holiday_importance', 0.0)
                
                # 提取可解释性分析文本
                interpretability = feature_importance.get('interpretability_analysis', {})
                
                # 创建SHAP分析记录
                shap_analysis = ShapAnalysis(
                    model=model_name,
                    train_dt=current_date,
                    historical_load_contribution=historical_contribution,
                    weather_contribution=weather_contribution,
                    time_contribution=time_contribution,
                    temp_max_importance=temp_max_imp,
                    temp_min_importance=temp_min_imp,
                    precipitation_importance=precip_imp,
                    humidity_importance=humidity_imp,
                    weekday_importance=weekday_imp,
                    month_importance=month_imp,
                    holiday_importance=holiday_imp,
                    historical_load_impact=interpretability.get('historical_load_impact', ''),
                    weather_impact=interpretability.get('weather_impact', ''),
                    weather_detail=interpretability.get('weather_detail', ''),
                    time_impact=interpretability.get('time_impact', ''),
                    time_detail=interpretability.get('time_detail', ''),
                    holiday_impact=interpretability.get('holiday_impact', ''),
                    feature_importance_json=self._convert_to_json_safe(feature_importance),
                    created_at=current_date
                )
                
                db.add(shap_analysis)
                db.commit()
                
                logger.info(f"✅ SHAP可解释性分析已保存: {model_name}")
                logger.info(f"   历史负荷贡献: {historical_contribution:.3f}")
                logger.info(f"   气象因素贡献: {weather_contribution:.3f}")
                logger.info(f"   时间因素贡献: {time_contribution:.3f}")
                
        except Exception as e:
            logger.error(f"❌ 保存SHAP可解释性分析失败 {model_name}: {e}")
            import traceback
            traceback.print_exc()
    
    def _save_ensemble_weights(self, model_name: str, weights: Dict[str, float]):
        """保存集成模型权重信息到feat_imp表"""
        try:
            logger.info(f"开始保存集成模型权重到数据库: {model_name}")
            
            with get_db_session() as db:
                # 先删除该模型今天的权重数据
                db.query(FeatImp).filter(
                    FeatImp.model == model_name,
                    FeatImp.train_dt == datetime.now().date()
                ).delete()
                
                # 保存各模型权重
                for sub_model, weight in weights.items():
                    feat_imp = FeatImp(
                        model=model_name,
                        feat=f"weight_{sub_model}",
                        imp=weight,
                        train_dt=datetime.now().date()
                    )
                    db.add(feat_imp)
                
                db.commit()
                logger.info(f"✅ 集成模型权重已保存: {model_name}, 权重: {weights}")
                
        except Exception as e:
            logger.error(f"❌ 保存集成模型权重失败 {model_name}: {e}")
            import traceback
            traceback.print_exc()
    
    def _save_ensemble_shap_analysis(self, model_name: str, weights: Dict[str, float], results: Dict[str, Dict[str, Any]]):
        """为集成模型生成和保存加权的SHAP可解释性分析"""
        try:
            logger.info(f"开始为集成模型生成SHAP可解释性分析: {model_name}")
            
            # 收集子模型的SHAP分析数据
            sub_model_shaps = {}
            for sub_model, weight in weights.items():
                if sub_model in results and 'feature_importance' in results[sub_model]:
                    sub_model_shaps[sub_model] = {
                        'weight': weight,
                        'shap_data': results[sub_model]['feature_importance']
                    }
            
            if not sub_model_shaps:
                logger.warning(f"没有找到子模型的SHAP分析数据，跳过集成模型SHAP分析")
                return
            
            # 计算加权平均的贡献度
            weighted_historical_contribution = 0.0
            weighted_weather_contribution = 0.0
            weighted_time_contribution = 0.0
            
            # 计算加权平均的气象因素重要性
            weighted_temp_max = 0.0
            weighted_temp_min = 0.0
            weighted_precip = 0.0
            weighted_humidity = 0.0
            
            # 计算加权平均的时间因素重要性
            weighted_weekday = 0.0
            weighted_month = 0.0
            weighted_holiday = 0.0
            
            # 收集可解释性分析文本
            interpretability_texts = []
            
            for sub_model, data in sub_model_shaps.items():
                weight = data['weight']
                shap_data = data['shap_data']
                
                # 贡献度加权平均
                if 'historical_load' in shap_data:
                    weighted_historical_contribution += shap_data['historical_load'].get('contribution_ratio', 0) * weight
                if 'weather' in shap_data:
                    weighted_weather_contribution += shap_data['weather'].get('contribution_ratio', 0) * weight
                if 'time' in shap_data:
                    weighted_time_contribution += shap_data['time'].get('contribution_ratio', 0) * weight
                
                # 气象因素加权平均
                if 'weather' in shap_data:
                    weather_data = shap_data['weather']
                    weighted_temp_max += weather_data.get('temperature_max_importance', 0) * weight
                    weighted_temp_min += weather_data.get('temperature_min_importance', 0) * weight
                    weighted_precip += weather_data.get('precipitation_importance', 0) * weight
                    weighted_humidity += weather_data.get('humidity_importance', 0) * weight
                
                # 时间因素加权平均
                if 'time' in shap_data:
                    time_data = shap_data['time']
                    weighted_weekday += time_data.get('weekday_importance', 0) * weight
                    weighted_month += time_data.get('month_importance', 0) * weight
                    weighted_holiday += time_data.get('holiday_importance', 0) * weight
                
                # 收集解释文本
                if 'interpretability_analysis' in shap_data:
                    interpretability_texts.append(f"{sub_model.upper()}({weight:.1%}): {shap_data['interpretability_analysis']}")
            
            # 生成集成模型的可解释性分析
            ensemble_interpretability = self._generate_ensemble_interpretability_analysis(
                weighted_historical_contribution,
                weighted_weather_contribution,
                weighted_time_contribution,
                weights
            )
            
            current_date = datetime.now().date()
            
            with get_db_session() as db:
                # 先删除该模型今天的SHAP分析数据
                db.query(ShapAnalysis).filter(
                    ShapAnalysis.model == model_name,
                    ShapAnalysis.train_dt == current_date
                ).delete()
                
                # 创建集成模型的SHAP分析记录
                shap_analysis = ShapAnalysis(
                    model=model_name,
                    train_dt=current_date,
                    historical_load_contribution=weighted_historical_contribution,
                    weather_contribution=weighted_weather_contribution,
                    time_contribution=weighted_time_contribution,
                    temp_max_importance=weighted_temp_max,
                    temp_min_importance=weighted_temp_min,
                    precipitation_importance=weighted_precip,
                    humidity_importance=weighted_humidity,
                    weekday_importance=weighted_weekday,
                    month_importance=weighted_month,
                    holiday_importance=weighted_holiday,
                    historical_load_impact=ensemble_interpretability.get('historical_load_impact', ''),
                    weather_impact=ensemble_interpretability.get('weather_impact', ''),
                    weather_detail=ensemble_interpretability.get('weather_detail', ''),
                    time_impact=ensemble_interpretability.get('time_impact', ''),
                    time_detail=ensemble_interpretability.get('time_detail', ''),
                    holiday_impact=ensemble_interpretability.get('holiday_impact', ''),
                    feature_importance_json=self._convert_to_json_safe({
                        'ensemble_weights': weights,
                        'weighted_contributions': {
                            'historical_load': weighted_historical_contribution,
                            'weather': weighted_weather_contribution,
                            'time': weighted_time_contribution
                        },
                        'sub_models_analysis': sub_model_shaps
                    }),
                    created_at=current_date
                )
                
                db.add(shap_analysis)
                db.commit()
                
                logger.info(f"✅ 集成模型SHAP可解释性分析已保存: {model_name}")
                logger.info(f"   加权历史负荷贡献: {weighted_historical_contribution:.3f}")
                logger.info(f"   加权气象因素贡献: {weighted_weather_contribution:.3f}")
                logger.info(f"   加权时间因素贡献: {weighted_time_contribution:.3f}")
                logger.info(f"   基于子模型权重: {weights}")
                
        except Exception as e:
            logger.error(f"❌ 保存集成模型SHAP可解释性分析失败 {model_name}: {e}")
            import traceback
            traceback.print_exc()
    
    def _generate_ensemble_interpretability_analysis(self, hist_contrib: float, weather_contrib: float, 
                                                   time_contrib: float, weights: Dict[str, float]) -> Dict[str, str]:
        """生成集成模型的可解释性分析文本"""
        try:
            analysis = {}
            
            # 历史负荷影响分析
            if hist_contrib > 0.8:
                analysis['historical_load_impact'] = f"集成模型主要依赖历史负荷模式({hist_contrib:.1%})，体现了负荷的强时序性特征"
            elif hist_contrib > 0.6:
                analysis['historical_load_impact'] = f"集成模型较多依赖历史负荷({hist_contrib:.1%})，同时考虑其他因素"
            else:
                analysis['historical_load_impact'] = f"集成模型对历史负荷的依赖度相对较低({hist_contrib:.1%})"
            
            # 气象因素影响分析
            if weather_contrib > 0.2:
                analysis['weather_impact'] = f"气象因素在集成模型中起到重要作用({weather_contrib:.1%})，说明天气对负荷有显著影响"
                analysis['weather_detail'] = "集成模型综合考虑了温度、湿度、降水等多种气象因素"
            elif weather_contrib > 0.05:
                analysis['weather_impact'] = f"气象因素在集成模型中起到一定作用({weather_contrib:.1%})"
                analysis['weather_detail'] = "集成模型适度考虑了气象因素的影响"
            else:
                analysis['weather_impact'] = f"气象因素在集成模型中影响较小({weather_contrib:.1%})"
                analysis['weather_detail'] = "集成模型对气象因素的依赖度较低"
            
            # 时间因素影响分析
            if time_contrib > 0.1:
                analysis['time_impact'] = f"时间因素在集成模型中有明显作用({time_contrib:.1%})"
                analysis['time_detail'] = "集成模型考虑了工作日、季节性等时间规律"
            else:
                analysis['time_impact'] = f"时间因素在集成模型中影响较小({time_contrib:.1%})"
                analysis['time_detail'] = "集成模型对时间模式的依赖度较低"
            
            # 模型权重分析
            dominant_model = max(weights.items(), key=lambda x: x[1])
            analysis['model_weights_analysis'] = f"集成模型主要依赖{dominant_model[0].upper()}({dominant_model[1]:.1%})，体现了该模型的优势"
            
            return analysis
            
        except Exception as e:
            logger.error(f"生成集成模型可解释性分析失败: {e}")
            return {}
    
    def get_model_interpretability(self, model_name: str) -> Dict[str, Any]:
        """获取模型可解释性分析"""
        try:
            model_path = self.model_dir / f'{model_name}_model.pkl'
            if not model_path.exists():
                return {}
            
            with open(model_path, 'rb') as f:
                model_data = pickle.load(f)
            
            return model_data.get('metrics', {}).get('feature_importance', {})
            
        except Exception as e:
            logger.error(f"获取模型可解释性失败: {e}")
            return {}
    
    def get_shap_analysis_from_db(self, model_name: str) -> Dict[str, Any]:
        """从数据库获取SHAP可解释性分析"""
        try:
            with get_db_session() as db:
                # 获取最新的SHAP分析记录
                shap_record = db.query(ShapAnalysis).filter(
                    ShapAnalysis.model == model_name
                ).order_by(ShapAnalysis.created_at.desc()).first()
                
                if not shap_record:
                    logger.warning(f"未找到模型 {model_name} 的SHAP分析记录")
                    return {}
                
                # 构建返回结果
                result = {
                    'model': shap_record.model,
                    'train_date': shap_record.train_dt.isoformat() if shap_record.train_dt else None,
                    'contributions': {
                        'historical_load': shap_record.historical_load_contribution,
                        'weather': shap_record.weather_contribution,
                        'time': shap_record.time_contribution
                    },
                    'weather_details': {
                        'temp_max_importance': shap_record.temp_max_importance,
                        'temp_min_importance': shap_record.temp_min_importance,
                        'precipitation_importance': shap_record.precipitation_importance,
                        'humidity_importance': shap_record.humidity_importance
                    },
                    'time_details': {
                        'weekday_importance': shap_record.weekday_importance,
                        'month_importance': shap_record.month_importance,
                        'holiday_importance': shap_record.holiday_importance
                    },
                    'interpretability_analysis': {
                        'historical_load_impact': shap_record.historical_load_impact,
                        'weather_impact': shap_record.weather_impact,
                        'weather_detail': shap_record.weather_detail,
                        'time_impact': shap_record.time_impact,
                        'time_detail': shap_record.time_detail,
                        'holiday_impact': shap_record.holiday_impact
                    },
                    'feature_importance_json': shap_record.feature_importance_json
                }
                
                return result
                
        except Exception as e:
            logger.error(f"获取SHAP分析记录失败 {model_name}: {e}")
            return {}
    
    def _convert_to_json_safe(self, obj):
        """将包含numpy数组的对象转换为JSON安全格式"""
        if isinstance(obj, np.ndarray):
            return obj.tolist()
        elif isinstance(obj, dict):
            return {k: self._convert_to_json_safe(v) for k, v in obj.items()}
        elif isinstance(obj, (list, tuple)):
            return [self._convert_to_json_safe(item) for item in obj]
        elif isinstance(obj, (np.integer, np.floating)):
            return float(obj)
        else:
            return obj
    
    def save_model_eval_to_db(self, model_name: str, metrics: Dict[str, Any], 
                             train_samples: int, test_samples: int, features_count: int,
                             training_time: float, data_range: str):
        """保存模型评估数据到数据库 - 参考multi_model_trainer.py"""
        logger.info(f"开始保存模型评估数据到数据库: {model_name}")
        try:
            with get_db_session() as db:
                current_date = datetime.now().date()
                
                # 检查是否已存在相同记录（先按日期查找，如果没有则查找最新的）
                existing = db.query(ModelEval).filter(
                    ModelEval.model == model_name,
                    ModelEval.train_dt == current_date
                ).first()
                
                # 如果今天没有记录，查找最新的记录进行更新
                if not existing:
                    existing = db.query(ModelEval).filter(
                        ModelEval.model == model_name
                    ).order_by(ModelEval.created_at.desc()).first()
                
                if existing:
                    # 更新现有记录
                    setattr(existing, 'mae', metrics.get('mae', 0.0))
                    setattr(existing, 'rmse', metrics.get('rmse', 0.0))
                    setattr(existing, 'r2', metrics.get('r2', 0.0))
                    setattr(existing, 'mape', metrics.get('mape', 0.0))
                    setattr(existing, 'max_err', metrics.get('rmse', 0.0) * 2)
                    setattr(existing, 'train_dt', current_date)  # 更新训练日期
                    setattr(existing, 'created_at', datetime.now())  # 更新创建时间
                    logger.info(f"更新现有模型评估记录: {model_name}, MAE={existing.mae:.4f}, R²={existing.r2:.4f}")
                else:
                    # 创建新记录
                    model_eval = ModelEval(
                        model=model_name,
                        train_dt=current_date,
                        mae=metrics.get('mae', 0.0),
                        rmse=metrics.get('rmse', 0.0),
                        r2=metrics.get('r2', 0.0),
                        mape=metrics.get('mape', 0.0),
                        max_err=metrics.get('rmse', 0.0) * 2,  # 估算最大误差
                        created_at=datetime.now()
                    )
                    db.add(model_eval)
                    logger.info(f"创建新模型评估记录: {model_name}, MAE={model_eval.mae:.4f}, R²={model_eval.r2:.4f}")
                
                db.commit()
                logger.info(f"✅ 模型评估数据已成功保存到数据库: {model_name}")
                
                # 验证数据是否确实保存成功
                verification = db.query(ModelEval).filter(
                    ModelEval.model == model_name
                ).order_by(ModelEval.created_at.desc()).first()
                
                if verification:
                    logger.info(f"✅ 数据验证成功: {model_name} - MAE={verification.mae:.4f}, 创建时间={verification.created_at}")
                else:
                    logger.warning(f"⚠️ 数据验证失败: {model_name} - 未找到保存的记录")
                
        except Exception as e:
            logger.error(f"❌ 保存模型评估数据失败 {model_name}: {e}")
            import traceback
            traceback.print_exc()

if __name__ == "__main__":
    # 测试增强训练器
    trainer = EnhancedDualTrainer()
    
    # 测试并行训练
    results = trainer.train_all_models_parallel("1_month")
    
    print("并行训练结果:")
    for model_name, metrics in results.items():
        print(f"{model_name}: MAE={metrics['mae']:.4f}, RMSE={metrics['rmse']:.4f}, R²={metrics['r2']:.4f}")
        
        # 打印可解释性分析
        if 'feature_importance' in metrics:
            print(f"{model_name}特征重要性分析:")
            importance = metrics['feature_importance']
            if 'interpretability_analysis' in importance:
                for key, value in importance['interpretability_analysis'].items():
                    print(f"  {key}: {value}")