import pandas as pd
import numpy as np
from datetime import datetime, date, timedelta
from typing import Dict, List, Optional, Tuple, Union, Any
import joblib
import pickle
import os
import logging
import torch
from sqlalchemy.orm import Session
from sqlalchemy import and_, text, func

# Machine learning models
import lightgbm as lgb
import xgboost as xgb
# MLP模型已移除，只保留LightGBM和XGBoost
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score

from backend.config.database import get_db_session
from backend.entities.load_data_new import LoadData
from backend.entities.weather_daily import WeatherDaily
from backend.entities.prediction import PredResult, ModelEval
from backend.entities.model_training_log import ModelTrainingLog
from backend.entities.feat_imp import FeatImp
from backend.entities.holiday import HolidayInfo
from backend.service.weather_forecast_service import weather_forecast_service
from backend.utils.holiday_rule_engine import is_holiday, get_holiday_name
from backend.service.feature_importance_service import FeatureImportanceService

logger = logging.getLogger(__name__)


class PredictionService:
    """预测服务 - 专注于模型加载和多输出预测
    
    主要职责：
    1. 加载已训练的模型
    2. 准备预测特征（与多模型训练器保持一致）
    3. 执行单模型和集成预测
    4. 保存预测结果到数据库
    
    注意：模型训练由 MultiModelTrainer 负责，此服务专注于预测功能
    """
    
    def __init__(self):
        self.models = {}
        self.scalers = {}
        self.y_scalers = {}  # 目标变量标准化器
        # 使用绝对路径确保在任何工作目录下都能正确找到模型文件
        import os
        from pathlib import Path
        
        # 获取项目根目录（backend的父目录）
        backend_dir = Path(__file__).parent.parent
        project_root = backend_dir.parent
        self.model_dir = str(project_root / "backend" / "models" / "saved_models")
        os.makedirs(self.model_dir, exist_ok=True)
        
        # 模型权重 (用于混合预测)
        self.ensemble_weights = {
            'lightgbm': 0.6,
            'xgboost': 0.4
        }
        
        self.is_trained = {'lightgbm': False, 'xgboost': False}
    
        # 集成模型配置
        self.ensemble_config = None
        
        # 特征重要性分析服务
        self.feature_importance_service = FeatureImportanceService()
        
    # MLP特征工程方法已移除，只保留LightGBM和XGBoost
        
    def load_models(self) -> bool:
        """加载已训练的模型（支持新的pkl格式）"""
        try:
            model_files = {
                'lightgbm': 'lightgbm_model.pkl',
                'xgboost': 'xgboost_model.pkl'
            }
            
            for model_name, filename in model_files.items():
                model_path = os.path.join(self.model_dir, filename)
                
                if os.path.exists(model_path):
                    try:
                        with open(model_path, 'rb') as f:
                            model_data = pickle.load(f)
                        
                        # 检查是否是新格式（包含元数据）
                        if isinstance(model_data, dict):
                            logger.info(f"检测到{model_name}模型为字典格式，键: {list(model_data.keys())}")
                            # 新格式：包含模型和元数据
                            # MLP模型已移除，只保留LightGBM和XGBoost
                            # LightGBM和XGBoost模型：检查是否包含model键
                            if 'model' in model_data:
                                self.models[model_name] = model_data['model']
                                
                                # 为XGBoost保存完整的特征处理组件
                                if model_name == 'xgboost':
                                    # 保存XGBoost的特征选择器和标准化器
                                    selector = model_data.get('selector')
                                    scaler = model_data.get('scaler')
                                    self.scalers[model_name] = {
                                        'selector': selector,
                                        'scaler': scaler,
                                        'selected_features': model_data.get('selected_features', 685)
                                    }
                                    logger.info(f"加载{model_name}模型成功（新格式），包含特征选择器和标准化器")
                                else:
                                    # LightGBM只需要标准化器
                                    if 'scaler' in model_data and model_data['scaler'] is not None:
                                        self.scalers[model_name] = model_data['scaler']
                                    logger.info(f"加载{model_name}模型成功（新格式）")
                            else:
                                logger.error(f"{model_name}模型字典格式不正确，缺少model键: {list(model_data.keys())}")
                                self.is_trained[model_name] = False
                                continue
                        else:
                            # 旧格式：直接是模型对象
                            # MLP模型已移除，只保留LightGBM和XGBoost
                            self.models[model_name] = model_data
                            logger.info(f"加载{model_name}模型成功（兼容旧格式）")
                        
                        self.is_trained[model_name] = True
                        
                    except Exception as e:
                        logger.error(f"加载{model_name}模型失败: {e}")
                        self.is_trained[model_name] = False
                        continue
                else:
                    logger.warning(f"模型文件不存在: {model_name} -> {model_path}")
                    self.is_trained[model_name] = False
            
            # MLP标准化器已移除
            
            # 为兼容性，尝试加载单独的目标变量标准化器文件（如果存在）
            y_scaler_path = os.path.join(self.model_dir, 'y_scaler.pkl')
            if os.path.exists(y_scaler_path) and 'global' not in self.y_scalers:
                try:
                    with open(y_scaler_path, 'rb') as f:
                        self.y_scalers['global'] = pickle.load(f)
                    logger.info("加载单独的目标变量标准化器成功（兼容性）")
                except Exception as e:
                    logger.error(f"加载单独的目标变量标准化器失败: {e}")
            
            # 加载集成模型配置
            ensemble_config_path = os.path.join(self.model_dir, 'ensemble_config.json')
            if os.path.exists(ensemble_config_path):
                try:
                    import json
                    with open(ensemble_config_path, 'r') as f:
                        self.ensemble_config = json.load(f)
                    logger.info("加载集成模型配置成功")
                except Exception as e:
                    logger.error(f"加载集成模型配置失败: {e}")
            
            loaded_models = [name for name, trained in self.is_trained.items() if trained]
            logger.info(f"成功加载的模型: {loaded_models}")
            
            return len(loaded_models) > 0
            
        except Exception as e:
            logger.error(f"加载模型失败: {e}")
            return False
    
    def prepare_prediction_features(self, target_date: str) -> Optional[np.ndarray]:
        """准备预测特征 - 与多模型训练器保持一致"""
        try:
            target_dt = datetime.strptime(target_date, '%Y-%m-%d').date()
            
            with get_db_session() as db:
                # 获取历史负荷数据
                historical_features = self._get_historical_load_features(target_dt, db)
                
                # 获取气象数据
                weather_data = db.query(WeatherDaily).filter(WeatherDaily.dt == target_dt).first()
                
                # 如果数据库中没有气象数据，尝试从气象预报服务获取
                if not weather_data:
                    logger.info(f"数据库中没有 {target_date} 的气象数据，尝试从气象预报服务获取")
                    weather_forecast = weather_forecast_service.get_weather_forecast_for_prediction(target_dt)
                    if weather_forecast:
                        weather_data = db.query(WeatherDaily).filter(WeatherDaily.dt == target_dt).first()
                        logger.info(f"成功从气象预报服务获取 {target_date} 的气象数据")
                
                # 获取节假日数据
                holiday_data = db.query(HolidayInfo).filter(HolidayInfo.dt == target_dt).first()
                
                # 构建特征向量
                feature_vector = []
                
                # 历史负荷特征：前7天的96个时间点 = 672维
                feature_vector.extend(historical_features)
                
                # 气象特征：4维
                if weather_data:
                    weather_features = [
                        weather_data.t_max or 0.0,
                        weather_data.t_min or 0.0, 
                        weather_data.precip or 0.0,
                        weather_data.humidity or 0.0
                    ]
                else:
                    weather_features = [0.0, 0.0, 0.0, 0.0]
                
                feature_vector.extend(weather_features)
                
                # 时间特征：周几、月份等 (9维)
                weekday = target_dt.weekday()  # 0-6
                month = target_dt.month
                
                # 使用比利时节假日规则引擎检查是否为节假日
                try:
                    is_holiday_flag = is_holiday(target_dt, db)
                    logger.debug(f"日期 {target_dt}: 节假日={is_holiday_flag}")
                except Exception as e:
                    logger.warning(f"节假日检查失败 {target_dt}: {e}")
                    is_holiday_flag = False
                
                time_features = [
                    1.0 if weekday == i else 0.0 for i in range(7)  # 7维：周几one-hot
                ] + [
                    month / 12.0,  # 月份归一化
                    1.0 if is_holiday_flag else 0.0  # 是否节假日
                ]
                
                feature_vector.extend(time_features)
                
                # 应用与多模型训练器相同的特征工程
                features = self._apply_feature_engineering(np.array([feature_vector]))
                return features
                
        except Exception as e:
            logger.error(f"准备预测特征失败: {e}")
            return None
    
    def _apply_feature_engineering(self, features: np.ndarray) -> np.ndarray:
        """应用与原始训练模型一致的特征工程（685维）"""
        try:
            # 确保返回685维特征以匹配训练好的模型
            # 原始模型期望的特征结构：672(历史) + 4(气象) + 9(时间) = 685
            if features.shape[1] >= 685:
                # 如果已经是685维或更多，只保留前685维
                logger.info(f"特征维度匹配: {features.shape[1]} -> 685")
                return features[:, :685]
            elif features.shape[1] >= 672:
                # 简单返回原始特征（匹配685维模型）
                # 确保特征维度为685：672(历史) + 4(气象) + 9(时间) = 685
                if features.shape[1] >= 685:
                    logger.info(f"特征维度截取: {features.shape[1]} -> 685")
                    return features[:, :685]
                else:
                    # 如果特征不足685维，用零填充
                    padded_features = np.zeros((features.shape[0], 685))
                    padded_features[:, :features.shape[1]] = features
                    logger.info(f"特征维度填充: {features.shape[1]} -> 685")
                    return padded_features
            
        except Exception as e:
            logger.warning(f"特征工程失败，使用原始特征: {e}")
        
        return features
    
    def _apply_lightgbm_feature_engineering(self, features: np.ndarray) -> np.ndarray:
        """应用LightGBM特征工程 - 与enhanced_dual_trainer.py保持一致"""
        try:
            if 'lightgbm' not in self.scalers:
                logger.warning("LightGBM特征处理组件未加载，使用原始特征")
                return features
            
            scaler = self.scalers['lightgbm']
            logger.info(f"开始LightGBM特征工程: 原始特征{features.shape[1]}维")
            
            # LightGBM使用StandardScaler标准化
            if scaler is not None:
                try:
                    features_scaled = scaler.transform(features)
                    logger.info(f"LightGBM特征标准化完成: {features.shape[1]}维")
                except Exception as e:
                    logger.error(f"LightGBM特征标准化失败: {e}")
                    features_scaled = features
            else:
                logger.info("没有LightGBM标准化器，跳过特征标准化")
                features_scaled = features
            
            logger.info(f"LightGBM特征工程完成: 最终{features_scaled.shape[1]}维")
            return features_scaled
            
        except Exception as e:
            logger.error(f"LightGBM特征工程失败: {e}")
            logger.warning("使用原始特征作为备用方案")
            return features

    def _apply_xgboost_feature_engineering(self, features: np.ndarray) -> np.ndarray:
        """应用XGBoost特征工程 - 与enhanced_dual_trainer.py保持一致"""
        try:
            if 'xgboost' not in self.scalers:
                logger.warning("XGBoost特征处理组件未加载，使用原始特征")
                return features
            
            scaler_data = self.scalers['xgboost']
            selector = scaler_data.get('selector')
            scaler = scaler_data.get('scaler')
            expected_features = scaler_data.get('selected_features', 685)
            
            logger.info(f"开始XGBoost特征工程: 原始特征{features.shape[1]}维")
            
            # 1. 特征选择 - 与enhanced_dual_trainer.py保持一致
            if selector is not None:
                try:
                    features_selected = selector.transform(features)
                    logger.info(f"特征选择完成: {features.shape[1]} -> {features_selected.shape[1]}维")
                except Exception as e:
                    logger.error(f"特征选择失败: {e}")
                    features_selected = features
            else:
                logger.info("没有特征选择器，跳过特征选择")
                features_selected = features
            
            # 2. 标准化 - 与enhanced_dual_trainer.py保持一致
            if scaler is not None:
                try:
                    features_scaled = scaler.transform(features_selected)
                    logger.info(f"特征标准化完成: {features_selected.shape[1]}维")
                except Exception as e:
                    logger.error(f"特征标准化失败: {e}")
                    features_scaled = features_selected
            else:
                logger.info("没有标准化器，跳过特征标准化")
                features_scaled = features_selected
            
            # 3. 维度验证 - 确保与enhanced_dual_trainer.py训练时一致
            if features_scaled.shape[1] != expected_features:
                logger.warning(f"特征维度不匹配: 期望{expected_features}, 实际{features_scaled.shape[1]}")
                # 使用与enhanced_dual_trainer.py相同的逻辑
                if features_scaled.shape[1] < expected_features:
                    # 填充零特征
                    padded_features = np.zeros((features_scaled.shape[0], expected_features))
                    padded_features[:, :features_scaled.shape[1]] = features_scaled
                    features_scaled = padded_features
                    logger.info(f"特征填充到{expected_features}维")
                else:
                    # 截取特征
                    features_scaled = features_scaled[:, :expected_features]
                    logger.info(f"特征截取到{expected_features}维")
            
            logger.info(f"XGBoost特征工程完成: 最终{features_scaled.shape[1]}维")
            return features_scaled
            
        except Exception as e:
            logger.error(f"XGBoost特征工程失败: {e}")
            logger.warning("使用原始特征作为备用方案")
            return features
    
    
    def _validate_predictions(self, predictions: np.ndarray, model_name: Optional[str] = None) -> np.ndarray:
        """验证预测结果并确保在合理范围内 - 不进行反标准化"""
        try:
            # 检查预测值的范围
            pred_min = predictions.min()
            pred_max = predictions.max()
            model_display = model_name or "未知模型"
            logger.info(f"模型 {model_display} 预测值范围: {pred_min:.2f} - {pred_max:.2f} MW")

            # 处理树模型的单输出扩展
            if model_name in ['lightgbm', 'xgboost'] and predictions.shape[1] == 1:
                logger.info(f"{model_name}检测到单输出预测，扩展为96个时间点")
                predictions = np.tile(predictions, (1, 96))
                pred_min = predictions.min()
                pred_max = predictions.max()
                logger.info(f"扩展后预测值范围: {pred_min:.2f} - {pred_max:.2f} MW")

            # 只输出范围日志，不做任何裁剪
            if 6000 <= pred_min <= 16000 and 6000 <= pred_max <= 16000:
                logger.info("✅ 预测值在合理的电力负荷范围内，无需处理")
                return predictions
            else:
                logger.warning(f"⚠️ 预测值超出合理范围 [{pred_min:.2f}, {pred_max:.2f}] MW（已取消合理性修正，直接返回原始结果）")
                return predictions

        except Exception as e:
            logger.error(f"预测值验证失败: {e}")
            # 兜底方案 - 返回比利时典型负荷曲线
            logger.info("使用兜底方案：典型负荷曲线")
            n_samples = predictions.shape[0]
            n_timesteps = predictions.shape[1]

            # 生成典型的日负荷曲线
            typical_curve = []
            for t in range(n_timesteps):
                hour = t * 24 / n_timesteps
                if 6 <= hour <= 9 or 17 <= hour <= 21:  # 早晚高峰
                    load = 12000 + 1000 * np.sin((hour - 6) * np.pi / 6)
                elif 10 <= hour <= 16:  # 日间平稳
                    load = 11500
                else:  # 夜间低谷
                    load = 9500
                typical_curve.append(load)

            return np.tile(typical_curve, (n_samples, 1))
    
    def _get_historical_load_features(self, target_dt: date, db: Session) -> List[float]:
        """获取历史负荷特征 - 前7天的96个时间点"""
        try:
            historical_loads = []
            
            # 获取前7天的负荷数据
            for i in range(7):
                hist_date = target_dt - timedelta(days=i+1)
                load_records = db.query(LoadData).filter(
                    LoadData.dt == hist_date
                ).order_by(LoadData.t_idx).all()
                
                if load_records and len(load_records) == 96:
                    daily_loads = [record.load_val for record in load_records]
                    historical_loads.extend(daily_loads)
                else:
                    # 如果没有历史数据，使用比利时平均负荷
                    historical_loads.extend([10500.0] * 96)
            
            # 确保有足够的历史数据 (7天 * 96小时 = 672维)
            while len(historical_loads) < 672:
                historical_loads.extend([10500.0] * 96)
            historical_loads = historical_loads[:672]
            
            return historical_loads
            
        except Exception as e:
            logger.error(f"获取历史负荷特征失败: {e}")
            return [10500.0] * 672
    
    def predict_single_day(self, target_date: str, model_name: str = 'ensemble') -> Optional[Dict]:
        """单日预测 - 多输出预测"""
        try:
            # 确保模型已加载
            if not any(self.is_trained.values()):
                self.load_models()
            
            # 检查可用模型
            available_models = [name for name, trained in self.is_trained.items() if trained]
            logger.info(f"可用模型: {available_models}")
            
            if not available_models:
                return {'success': False, 'error': '没有可用的模型'}
            
            # 如果请求的特定模型不可用，提供智能回退
            original_model_name = model_name
            if model_name != 'ensemble' and not self.is_trained.get(model_name, False):
                logger.warning(f"请求的模型 {model_name} 不可用")
                
                if available_models:
                    if len(available_models) == 1:
                        model_name = available_models[0]
                        logger.info(f"回退到唯一可用模型: {model_name}")
                    else:
                        model_name = 'ensemble'
                        logger.info(f"回退到ensemble模式，可用模型: {available_models}")
                else:
                    return {'success': False, 'error': f'请求的模型 {original_model_name} 不可用，且没有其他可用模型'}
            
            # 准备特征
            features = self.prepare_prediction_features(target_date)
            if features is None:
                return {'success': False, 'error': '特征准备失败'}
            
            logger.info(f"特征形状: {features.shape}")
            
            # 进行预测
            if model_name == 'ensemble':
                predictions = self._predict_ensemble(features)
                already_denormalized = False
            else:
                predictions_result = self._predict_single_model(features, model_name)
                # 检查是否返回的是dict（MLP的情况）
                if isinstance(predictions_result, dict) and 'predictions' in predictions_result:
                    predictions = predictions_result['predictions']
                    already_denormalized = predictions_result.get('already_denormalized', False)
                else:
                    predictions = predictions_result
                    already_denormalized = False
            
            if predictions is None or len(predictions) != 96:
                logger.warning(f"预测失败，使用默认值")
                predictions = [8000.0] * 96
                already_denormalized = False
            
            # 反标准化预测结果（仅当未反标准化时）
            if not already_denormalized:
                predictions_array = np.array(predictions).reshape(1, -1)
                predictions_denormalized = self._validate_predictions(predictions_array, model_name or 'ensemble')
                predictions = predictions_denormalized.flatten().tolist()
            else:
                logger.info(f"模型 {model_name} 预测已反标准化，跳过后续处理")
            
            logger.info(f"预测结果反标准化完成，范围: {min(predictions):.2f} - {max(predictions):.2f}")
                
            # 保存预测结果
            db = get_db_session()
            try:
                # 确保predictions是List[float]类型
                if isinstance(predictions, list):
                    predictions_list = [float(p) for p in predictions]
                else:
                    logger.warning(f"预测结果类型异常: {type(predictions)}")
                    predictions_list = [8000.0] * 96
                
                self._save_prediction_results(target_date, model_name, predictions_list, db)
            finally:
                db.close()
                
            # 计算统计信息
            # 计算统计信息，确保 predictions 是 list[float]
            if isinstance(predictions, dict):
                logger.error("预测结果为字典类型，无法计算统计信息")
                avg_load = max_load = min_load = None
            else:
                try:
                    predictions_list = list(map(float, predictions))
                    avg_load = float(np.mean(predictions_list))
                    max_load = float(np.max(predictions_list))
                    min_load = float(np.min(predictions_list))
                except Exception as e:
                    logger.error(f"计算统计信息时出错: {e}")
                    avg_load = max_load = min_load = None

            # 获取气象信息
            weather_info = self._get_weather_info(target_date)
            # 获取模型性能指标
            model_metrics = self._get_model_metrics(model_name)
            
            # 获取特征重要性分析
            feature_importance_analysis = self.feature_importance_service.analyze_feature_importance(model_name, target_date)
            
            # 准备响应
            response = {
                'success': True,
                'message': f'{model_name}模型预测完成',
                'predictions': predictions,
                'weather_info': weather_info,
                'model_metrics': model_metrics,
                'feature_importance_analysis': feature_importance_analysis,
                'weather_analysis': feature_importance_analysis.get('weather_analysis', {
                    'temperatureImpact': '中等',
                    'humidityImpact': '较低',
                    'precipitationImpact': '较低',
                    'windSpeedImpact': '较低'
                })
            }
            
            # 如果使用了模型回退，添加说明
            if original_model_name != model_name:
                response['fallback_info'] = {
                    'requested_model': original_model_name,
                    'actual_model': model_name,
                    'reason': f'请求的模型 {original_model_name} 不可用，自动回退到 {model_name}'
                }
            
            return response
                
        except Exception as e:
            logger.error(f"单日预测失败: {e}")
            return {'success': False, 'error': str(e)}
    
    def _predict_single_model(self, features: np.ndarray, model_name: str) -> Optional[Union[List[float], Dict[str, Any]]]:
        """单模型预测"""
        try:
            if model_name not in self.models:
                logger.warning(f"模型 {model_name} 未加载")
                return None
            
            model = self.models[model_name]
            
            # MLP模型已移除，只保留LightGBM和XGBoost

            if model_name in ['lightgbm', 'xgboost']:
                # 树模型处理 - 需要应用与训练时一致的特征工程
                if model_name == 'xgboost':
                    # XGBoost需要应用与训练时一致的特征工程
                    features_processed = self._apply_xgboost_feature_engineering(features)
                    logger.info(f"XGBoost特征工程: {features.shape[1]} -> {features_processed.shape[1]}维")
                else:
                    # LightGBM也需要应用与训练时一致的特征工程
                    features_processed = self._apply_lightgbm_feature_engineering(features)
                    logger.info(f"LightGBM特征工程: {features.shape[1]} -> {features_processed.shape[1]}维")
                
                pred = model.predict(features_processed)
                
                # 处理不同的预测输出形状
                if pred.ndim == 1:
                    # 单个样本的预测结果
                    if len(pred) == 1:
                        # 单输出模型，需要扩展到96个时间点
                        logger.info(f"{model_name}检测到单输出模型，扩展为96个时间点")
                        return [pred[0]] * 96
                    elif len(pred) == 96:
                        # 多输出模型，直接返回
                        return pred.tolist()
                    else:
                        logger.warning(f"{model_name}预测输出维度异常: {len(pred)}")
                        return None
                elif pred.ndim == 2:
                    # 多个样本的预测结果
                    if pred.shape[1] == 1:
                        # 单输出模型，需要扩展到96个时间点
                        logger.info(f"{model_name}检测到单输出模型，扩展为96个时间点")
                        return [pred[0, 0]] * 96
                    elif pred.shape[1] == 96:
                        # 多输出模型，直接返回
                        return pred.flatten().tolist()
                    else:
                        logger.warning(f"{model_name}预测输出维度异常: {pred.shape}")
                        return None
                else:
                    logger.warning(f"{model_name}预测输出维度异常: {pred.shape}")
                    return None
            
            elif hasattr(model, 'predict'):
                # 其他模型处理
                pred = model.predict(features)
                if pred.ndim == 2:
                    return pred.flatten().tolist()
                else:
                    return pred.tolist()
            else:
                logger.error(f"模型 {model_name} 不支持predict方法")
                return None
            
        except Exception as e:
            logger.error(f"{model_name}预测失败: {e}")
            return None
    
    def _predict_ensemble(self, features: np.ndarray) -> Optional[List[float]]:
        """混合模型预测 - 多输出"""
        try:
            predictions = {}
            weights = {}
            
            # 只使用可用的模型
            available_models = ['lightgbm', 'xgboost']
            for model_name in available_models:
                if self.is_trained.get(model_name, False):
                    pred_result = self._predict_single_model(features, model_name)
                    # 处理预测结果
                    if isinstance(pred_result, dict) and 'predictions' in pred_result:
                        pred = pred_result['predictions']
                    else:
                        pred = pred_result
                        
                    if pred is not None and len(pred) == 96:
                        predictions[model_name] = pred
                        weights[model_name] = self.ensemble_weights.get(model_name, 0)
            
            if not predictions:
                logger.warning("没有可用的模型进行ensemble预测")
                return None
            
            logger.debug(f"参与ensemble的模型: {list(predictions.keys())}")
            
            # 加权平均
            total_weight = sum(weights.values())
            if total_weight == 0:
                # 如果权重为0，使用简单平均
                ensemble_pred = np.mean(list(predictions.values()), axis=0)
                logger.debug(f"使用简单平均")
            else:
                # 使用加权平均
                ensemble_pred = np.zeros(96)
                for model_name, pred in predictions.items():
                    weight = weights[model_name] / total_weight
                    ensemble_pred += weight * np.array(pred)
                    logger.debug(f"{model_name}: weight={weight}")
                logger.debug(f"加权平均结果")
            
            return ensemble_pred.tolist()
            
        except Exception as e:
            logger.error(f"混合模型预测失败: {e}")
            return None
    
    def _save_prediction_results(self, target_date: str, model_name: str, 
                               predictions: List[float], db: Session):
        """保存预测结果到数据库（使用UTC时间）"""
        try:
            from datetime import timezone
            
            target_dt = datetime.strptime(target_date, '%Y-%m-%d').date()
            
            # 先删除该日期和模型的所有旧记录
            db.query(PredResult).filter(
                and_(
                    PredResult.dt == target_dt,
                    PredResult.model == model_name
                )
            ).delete()
            
            # 批量创建新记录（使用UTC时间）
            records = []
            for t_idx, pred_val in enumerate(predictions, 1):
                hour = (t_idx - 1) // 4
                minute = ((t_idx - 1) % 4) * 15
                # 使用UTC时间创建时间戳
                dtm = datetime.combine(target_dt, datetime.min.time().replace(hour=hour, minute=minute)).replace(tzinfo=timezone.utc)
                
                prediction = PredResult(
                    dt=target_dt,
                    t_idx=t_idx,
                    dtm=dtm,
                    model=model_name,
                    pred_val=float(pred_val)
                )
                records.append(prediction)
            
            # 批量插入
            db.bulk_save_objects(records)
            db.commit()
            
            logger.info(f"成功保存 {len(records)} 条预测结果（UTC时间）")
            
        except Exception as e:
            db.rollback()
            logger.error(f"保存预测结果失败: {e}")
            raise
    
    def _get_weather_info(self, target_date: str) -> Optional[Dict]:
        """获取气象信息"""
        try:
            target_dt = datetime.strptime(target_date, '%Y-%m-%d').date()
            
            with get_db_session() as db:
                weather_data = db.query(WeatherDaily).filter(WeatherDaily.dt == target_dt).first()
                
                if weather_data:
                    return {
                        't_max': weather_data.t_max,
                        't_min': weather_data.t_min,
                        'humidity': weather_data.humidity or 60,  # 使用真实湿度数据，默认60%
                        'precip': weather_data.precip or 0,
                        'pressure': weather_data.pressure or 1013,  # 添加气压数据
                        'wind_speed': weather_data.ws_max or 0  # 添加风速数据
                    }
                else:
                    # 如果没有气象数据，尝试从气象预报服务获取
                    logger.info(f"数据库中没有 {target_date} 的气象数据，尝试从气象预报服务获取")
                    try:
                        from backend.service.weather_forecast_service import WeatherForecastService
                        weather_service = WeatherForecastService()
                        forecast = weather_service.get_weather_forecast_for_prediction(target_dt)
                        
                        if forecast:
                            return {
                                't_max': forecast.get('t_max'),
                                't_min': forecast.get('t_min'),
                                'humidity': forecast.get('humidity', 60),
                                'precip': forecast.get('precip', 0),
                                'pressure': forecast.get('pressure', 1013),
                                'wind_speed': forecast.get('wind_speed', 0)
                            }
                    except Exception as e:
                        logger.warning(f"从气象预报服务获取数据失败: {e}")
                    
                    # 如果都没有，返回默认值
                    return {
                        't_max': 20.0,
                        't_min': 10.0,
                        'humidity': 60,
                        'precip': 0,
                        'pressure': 1013,
                        'wind_speed': 5.0
                    }
                    
        except Exception as e:
            logger.error(f"获取气象信息失败: {e}")
            return None
    
    def _get_model_metrics(self, model_name: str) -> Optional[Dict]:
        """获取模型性能指标"""
        try:
            with get_db_session() as db:
                if model_name == 'ensemble':
                    # 对于混合模型，返回平均指标
                    models = ['lightgbm', 'xgboost']
                    metrics_list = []
                    for m in models:
                        eval_record = db.query(ModelEval).filter(ModelEval.model == m).order_by(ModelEval.train_dt.desc()).first()
                        if eval_record:
                            metrics_list.append({
                                'mae': eval_record.mae,
                                'rmse': eval_record.rmse,
                                'mape': eval_record.mape,
                                'r2': eval_record.r2
                            })
                    
                    if metrics_list:
                        return {
                            'mae': round(float(str(metrics_list[0]['mae'])), 2),
                            'rmse': round(float(str(metrics_list[0]['rmse'])), 2),
                            'mape': round(float(str(metrics_list[0]['mape'])), 2),
                            'r2': round(float(str(metrics_list[0]['r2'])), 4)
                        }
                else:
                    eval_record = db.query(ModelEval).filter(ModelEval.model == model_name).order_by(ModelEval.train_dt.desc()).first()
                    if eval_record:
                        return {
                            'mae': round(float(str(eval_record.mae)), 2),
                            'rmse': round(float(str(eval_record.rmse)), 2),
                            'mape': round(float(str(eval_record.mape)), 2),
                            'r2': round(float(str(eval_record.r2)), 4)
                        }
            
            return None
            
        except Exception as e:
            logger.error(f"获取模型指标失败: {e}")
            return None
    
    def predict_multiple_days(self, start_date: str, days: int = 7, model_name: str = 'ensemble') -> Dict:
        """预测多天数据"""
        try:
            start_dt = datetime.strptime(start_date, '%Y-%m-%d').date()
            results = {}
            
            for i in range(days):
                target_date = (start_dt + timedelta(days=i)).strftime('%Y-%m-%d')
                result = self.predict_single_day(target_date, model_name)
                results[f'day_{i+1}'] = result
            
            success_count = sum(1 for r in results.values() if r and r.get('success', False))
            
            return {
                'success': success_count > 0,
                'total_days': days,
                'success_days': success_count,
                'results': results
            }
            
        except Exception as e:
            logger.error(f"多天预测失败: {e}")
            return {'success': False, 'error': str(e)}
    


# 保留原有的SimplePredictionModel类以兼容现有代码
class SimplePredictionModel:
    """简化的电力负荷预测模型 - 兼容性类"""
    
    def __init__(self, model_name: str = "simple_rf"):
        self.service = PredictionService()
        self.model_name = model_name
        self.is_trained = False
    
    def train_all_models(self) -> Dict[str, bool]:
        """训练所有模型 - 使用增强训练器"""
        try:
            from backend.models.enhanced_dual_trainer import EnhancedDualTrainer
            
            # 创建增强训练器
            trainer = EnhancedDualTrainer()
            
            # 训练所有模型
            results = trainer.train_all_models_parallel()
            
            # 重新加载模型
            self.service.load_models()
            
            # 转换结果格式
            converted_results = {}
            for model_name, metrics in results.items():
                if isinstance(metrics, dict) and 'mae' in metrics:
                    converted_results[model_name] = True
                else:
                    converted_results[model_name] = False
            
            return converted_results
            
        except Exception as e:
            logger.error(f"训练所有模型失败: {e}")
            return {
                'lightgbm': False,
                'xgboost': False,
                'ensemble': False
            }
    
    def predict_all_models(self, days: int = 7) -> Dict[str, bool]:
        """预测所有模型"""
        today = datetime.now().strftime('%Y-%m-%d')
        results = {}
        for model_name in ['lightgbm', 'xgboost', 'ensemble']:
            result = self.service.predict_multiple_days(today, days, model_name)
            results[model_name] = result.get('success', False)
        return results