#!/usr/bin/env python3
"""
双模型训练器 - 只使用LightGBM和XGBoost
基于比利时电力负荷预测，优化后的稳定版本
"""

import pandas as pd
import numpy as np
from datetime import datetime, date, timedelta
from typing import Dict, List, Optional, Tuple, Any
import logging
import os
import pickle
import json
from pathlib import Path

# 机器学习模型
import lightgbm as lgb
import xgboost as xgb
from sklearn.preprocessing import StandardScaler
from sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score
from sklearn.model_selection import train_test_split

# 数据库相关
from sqlalchemy.orm import Session
from sqlalchemy import text, and_

from backend.config.database import get_db_session
from backend.entities.load_data_new import LoadData
from backend.entities.weather_daily import WeatherDaily
from backend.entities.holiday import HolidayInfo
from backend.entities.model_training_log import ModelTrainingLog
from backend.entities.model_eval import ModelEval
from backend.utils.holiday_rule_engine import is_holiday

logger = logging.getLogger(__name__)

class DualModelTrainer:
    """双模型训练器 - 只使用LightGBM和XGBoost"""
    
    def __init__(self):
        self.models = {}
        self.scalers = {}
        self.model_dir = Path("backend/models/saved_models")
        self.model_dir.mkdir(parents=True, exist_ok=True)
        
        # 模型配置
        self.lightgbm_config = {
            'objective': 'regression',
            'metric': 'rmse',
            'boosting_type': 'gbdt',
            'num_leaves': 31,
            'learning_rate': 0.05,
            'feature_fraction': 0.9,
            'bagging_fraction': 0.8,
            'bagging_freq': 5,
            'verbose': -1,
            'random_state': 42
        }
        
        self.xgboost_config = {
            'objective': 'reg:squarederror',
            'eval_metric': 'rmse',
            'max_depth': 6,
            'learning_rate': 0.05,
            'n_estimators': 1000,
            'subsample': 0.8,
            'colsample_bytree': 0.9,
            'random_state': 42
        }
        
        # 集成权重
        self.ensemble_weights = {
            'lightgbm': 0.6,
            'xgboost': 0.4
        }
        
        logger.info("双模型训练器初始化完成")
    
    def load_data(self, train_range: str = "3_months") -> Tuple[np.ndarray, np.ndarray]:
        """加载训练数据"""
        try:
            # 确定数据范围
            end_date = date.today() - timedelta(days=1)
            if train_range == "1_month":
                start_date = end_date - timedelta(days=30)
            elif train_range == "3_months":
                start_date = end_date - timedelta(days=90)
            elif train_range == "6_months":
                start_date = end_date - timedelta(days=180)
            else:
                start_date = end_date - timedelta(days=90)
            
            logger.info(f"加载训练数据: {start_date} 到 {end_date}")
            
            features = []
            labels = []
            
            with get_db_session() as db:
                # 获取日期范围内的所有日期
                current_date = start_date + timedelta(days=7)  # 前7天用作历史数据
                
                while current_date <= end_date:
                    # 准备当日特征
                    feature_vector = self._prepare_daily_features(current_date, db)
                    
                    # 获取当日标签（96个时间点的负荷值）
                    daily_labels = self._get_daily_labels(current_date, db)
                    
                    if feature_vector is not None and daily_labels is not None:
                        features.append(feature_vector)
                        labels.append(daily_labels)
                    
                    current_date += timedelta(days=1)
            
            if not features:
                raise ValueError("没有找到有效的训练数据")
            
            X = np.array(features)
            y = np.array(labels)
            
            logger.info(f"数据加载完成: X.shape={X.shape}, y.shape={y.shape}")
            
            return X, y
            
        except Exception as e:
            logger.error(f"加载数据失败: {e}")
            raise
    
    def _prepare_daily_features(self, target_date: date, db: Session) -> Optional[np.ndarray]:
        """准备单日特征"""
        try:
            feature_vector = []
            
            # 1. 历史负荷特征 (7天 × 96个时间点 = 672维)
            historical_loads = []
            for i in range(7):
                hist_date = target_date - timedelta(days=i+1)
                daily_loads = self._get_daily_loads(hist_date, db)
                if daily_loads is not None:
                    historical_loads.extend(daily_loads)
                else:
                    # 使用平均值填充
                    historical_loads.extend([9000.0] * 96)
            
            feature_vector.extend(historical_loads)
            
            # 2. 气象特征 (4维)
            weather_data = db.query(WeatherDaily).filter(WeatherDaily.dt == target_date).first()
            if weather_data:
                weather_features = [
                    weather_data.t_max or 20.0,
                    weather_data.t_min or 10.0,
                    weather_data.precip or 0.0,
                    weather_data.humidity or 60.0
                ]
            else:
                weather_features = [20.0, 10.0, 0.0, 60.0]
            
            feature_vector.extend(weather_features)
            
            # 3. 时间特征 (9维)
            weekday = target_date.weekday()
            month = target_date.month
            is_holiday_flag = is_holiday(target_date, db)
            
            time_features = [
                1.0 if weekday == i else 0.0 for i in range(7)  # 周几 one-hot
            ] + [
                month / 12.0,  # 月份归一化
                1.0 if is_holiday_flag else 0.0  # 是否节假日
            ]
            
            feature_vector.extend(time_features)
            
            return np.array(feature_vector)
            
        except Exception as e:
            logger.error(f"准备特征失败 {target_date}: {e}")
            return None
    
    def _get_daily_loads(self, target_date: date, db: Session) -> Optional[List[float]]:
        """获取单日负荷数据"""
        try:
            load_records = db.query(LoadData).filter(
                LoadData.dt == target_date
            ).order_by(LoadData.t_idx).all()
            
            if len(load_records) == 96:
                return [record.load_val for record in load_records]
            else:
                return None
                
        except Exception as e:
            logger.error(f"获取负荷数据失败 {target_date}: {e}")
            return None
    
    def _get_daily_labels(self, target_date: date, db: Session) -> Optional[List[float]]:
        """获取单日标签数据"""
        return self._get_daily_loads(target_date, db)
    
    def train_lightgbm(self, X_train: np.ndarray, y_train: np.ndarray,
                      X_test: np.ndarray, y_test: np.ndarray,
                      train_range: str = "3_months",
                      training_start_time: Optional[datetime] = None) -> Dict[str, Any]:
        """训练LightGBM模型"""
        try:
            logger.info("开始训练LightGBM模型...")
            
            if training_start_time is None:
                training_start_time = datetime.now()
            
            # 数据预处理
            scaler = StandardScaler()
            X_train_scaled = scaler.fit_transform(X_train)
            X_test_scaled = scaler.transform(X_test)
            
            # 创建数据集
            train_data = lgb.Dataset(X_train_scaled, label=y_train)
            valid_data = lgb.Dataset(X_test_scaled, label=y_test, reference=train_data)
            
            # 训练模型
            model = lgb.train(
                self.lightgbm_config,
                train_data,
                valid_sets=[valid_data],
                num_boost_round=1000,
                callbacks=[lgb.early_stopping(stopping_rounds=100), lgb.log_evaluation(0)]
            )
            
            # 预测
            train_pred = model.predict(X_train_scaled)
            test_pred = model.predict(X_test_scaled)
            
            # 处理单输出到多输出
            if train_pred.ndim == 1:
                train_pred = np.tile(train_pred.reshape(-1, 1), (1, 96))
            if test_pred.ndim == 1:
                test_pred = np.tile(test_pred.reshape(-1, 1), (1, 96))
            
            # 计算指标
            metrics = self._calculate_metrics(y_train, train_pred, y_test, test_pred)
            metrics['training_time'] = (datetime.now() - training_start_time).total_seconds()
            
            # 保存模型
            model_data = {
                'model': model,
                'scaler': scaler,
                'config': self.lightgbm_config,
                'metrics': metrics,
                'training_time': datetime.now().isoformat()
            }
            
            model_path = self.model_dir / 'lightgbm_model.pkl'
            with open(model_path, 'wb') as f:
                pickle.dump(model_data, f)
            
            self.models['lightgbm'] = model
            self.scalers['lightgbm'] = scaler
            
            # 保存训练日志
            self._save_training_log('lightgbm', metrics, train_range, training_start_time)
            
            logger.info(f"LightGBM训练完成: MAE={metrics['mae']:.4f}, RMSE={metrics['rmse']:.4f}")
            
            return metrics
            
        except Exception as e:
            logger.error(f"LightGBM训练失败: {e}")
            return {'mae': 0.0, 'rmse': 0.0, 'r2': 0.0, 'mape': 0.0}
    
    def train_xgboost(self, X_train: np.ndarray, y_train: np.ndarray,
                     X_test: np.ndarray, y_test: np.ndarray,
                     train_range: str = "3_months",
                     training_start_time: Optional[datetime] = None) -> Dict[str, Any]:
        """训练XGBoost模型"""
        try:
            logger.info("开始训练XGBoost模型...")
            
            if training_start_time is None:
                training_start_time = datetime.now()
            
            # 数据预处理
            scaler = StandardScaler()
            X_train_scaled = scaler.fit_transform(X_train)
            X_test_scaled = scaler.transform(X_test)
            
            # 创建模型
            model = xgb.XGBRegressor(**self.xgboost_config)
            
            # 训练模型
            model.fit(
                X_train_scaled, y_train,
                eval_set=[(X_test_scaled, y_test)],
                early_stopping_rounds=100,
                verbose=False
            )
            
            # 预测
            train_pred = model.predict(X_train_scaled)
            test_pred = model.predict(X_test_scaled)
            
            # 处理单输出到多输出
            if train_pred.ndim == 1:
                train_pred = np.tile(train_pred.reshape(-1, 1), (1, 96))
            if test_pred.ndim == 1:
                test_pred = np.tile(test_pred.reshape(-1, 1), (1, 96))
            
            # 计算指标
            metrics = self._calculate_metrics(y_train, train_pred, y_test, test_pred)
            metrics['training_time'] = (datetime.now() - training_start_time).total_seconds()
            
            # 保存模型
            model_data = {
                'model': model,
                'scaler': scaler,
                'config': self.xgboost_config,
                'metrics': metrics,
                'training_time': datetime.now().isoformat()
            }
            
            model_path = self.model_dir / 'xgboost_model.pkl'
            with open(model_path, 'wb') as f:
                pickle.dump(model_data, f)
            
            self.models['xgboost'] = model
            self.scalers['xgboost'] = scaler
            
            # 保存训练日志
            self._save_training_log('xgboost', metrics, train_range, training_start_time)
            
            logger.info(f"XGBoost训练完成: MAE={metrics['mae']:.4f}, RMSE={metrics['rmse']:.4f}")
            
            return metrics
            
        except Exception as e:
            logger.error(f"XGBoost训练失败: {e}")
            return {'mae': 0.0, 'rmse': 0.0, 'r2': 0.0, 'mape': 0.0}
    
    def train_all_models(self, train_range: str = "3_months") -> Dict[str, Dict[str, Any]]:
        """训练所有模型"""
        try:
            logger.info("开始训练所有模型...")
            
            # 加载数据
            X, y = self.load_data(train_range)
            
            # 划分训练测试集
            X_train, X_test, y_train, y_test = train_test_split(
                X, y, test_size=0.2, random_state=42
            )
            
            logger.info(f"训练集: {X_train.shape}, 测试集: {X_test.shape}")
            
            results = {}
            training_start_time = datetime.now()
            
            # 训练LightGBM
            results['lightgbm'] = self.train_lightgbm(
                X_train, y_train, X_test, y_test, train_range, training_start_time
            )
            
            # 训练XGBoost
            results['xgboost'] = self.train_xgboost(
                X_train, y_train, X_test, y_test, train_range, training_start_time
            )
            
            # 保存集成配置
            ensemble_config = {
                'weights': self.ensemble_weights,
                'models': list(results.keys()),
                'training_time': datetime.now().isoformat(),
                'data_range': train_range
            }
            
            config_path = self.model_dir / 'ensemble_config.json'
            with open(config_path, 'w') as f:
                json.dump(ensemble_config, f, indent=2)
            
            logger.info("所有模型训练完成")
            
            return results
            
        except Exception as e:
            logger.error(f"训练所有模型失败: {e}")
            return {}
    
    def _calculate_metrics(self, y_train: np.ndarray, train_pred: np.ndarray,
                          y_test: np.ndarray, test_pred: np.ndarray) -> Dict[str, float]:
        """计算评估指标"""
        try:
            # 测试集指标
            test_mae = mean_absolute_error(y_test.flatten(), test_pred.flatten())
            test_rmse = np.sqrt(mean_squared_error(y_test.flatten(), test_pred.flatten()))
            test_r2 = r2_score(y_test.flatten(), test_pred.flatten())
            test_mape = np.mean(np.abs((y_test.flatten() - test_pred.flatten()) / (y_test.flatten() + 1e-8))) * 100
            
            # 训练集指标
            train_mae = mean_absolute_error(y_train.flatten(), train_pred.flatten())
            train_rmse = np.sqrt(mean_squared_error(y_train.flatten(), train_pred.flatten()))
            train_r2 = r2_score(y_train.flatten(), train_pred.flatten())
            train_mape = np.mean(np.abs((y_train.flatten() - train_pred.flatten()) / (y_train.flatten() + 1e-8))) * 100
            
            # 过拟合评分
            overfitting_score = abs(train_mae - test_mae) / test_mae if test_mae > 0 else 0.0
            
            return {
                'mae': float(test_mae),
                'rmse': float(test_rmse),
                'r2': float(test_r2),
                'mape': float(test_mape),
                'train_mae': float(train_mae),
                'train_rmse': float(train_rmse),
                'train_r2': float(train_r2),
                'train_mape': float(train_mape),
                'overfitting_score': float(overfitting_score)
            }
            
        except Exception as e:
            logger.error(f"计算指标失败: {e}")
            return {'mae': 0.0, 'rmse': 0.0, 'r2': 0.0, 'mape': 0.0}
    
    def _save_training_log(self, model_name: str, metrics: Dict[str, float],
                          train_range: str, training_start_time: datetime):
        """保存训练日志到数据库"""
        try:
            with get_db_session() as db:
                training_log = ModelTrainingLog(
                    model=model_name,
                    train_dt=training_start_time.date(),
                    train_start_time=training_start_time,
                    train_end_time=datetime.now(),
                    epoch=1000,  # 默认最大轮数
                    train_mae=metrics.get('train_mae', 0.0),
                    test_mae=metrics.get('mae', 0.0),
                    train_rmse=metrics.get('train_rmse', 0.0),
                    test_rmse=metrics.get('rmse', 0.0),
                    train_r2=metrics.get('train_r2', 0.0),
                    test_r2=metrics.get('r2', 0.0),
                    learning_rate=0.05,
                    status='completed',
                    message=f'{model_name}模型训练完成'
                )
                
                db.add(training_log)
                
                # 保存模型评估结果
                model_eval = ModelEval(
                    model=model_name,
                    train_dt=training_start_time.date(),
                    mae=metrics.get('mae', 0.0),
                    rmse=metrics.get('rmse', 0.0),
                    mape=metrics.get('mape', 0.0),
                    r2=metrics.get('r2', 0.0),
                    train_samples=1000,  # 估计值
                    test_samples=250,    # 估计值
                    features_count=685,  # 特征数量
                    training_time=metrics.get('training_time', 0.0),
                    data_range=train_range
                )
                
                db.add(model_eval)
                db.commit()
                
                logger.info(f"训练日志已保存: {model_name}")
                
        except Exception as e:
            logger.error(f"保存训练日志失败: {e}")

if __name__ == "__main__":
    # 测试训练器
    trainer = DualModelTrainer()
    results = trainer.train_all_models("1_month")
    
    print("训练结果:")
    for model_name, metrics in results.items():
        print(f"{model_name}: MAE={metrics['mae']:.4f}, RMSE={metrics['rmse']:.4f}, R²={metrics['r2']:.4f}")