#!/usr/bin/env python3
"""
增强训练API路由 - 支持LightGBM + XGBoost + 智能集成
"""

from fastapi import APIRouter, HTTPException, BackgroundTasks
from fastapi.responses import JSONResponse
from pydantic import BaseModel
from typing import Dict, Any, Optional, List
import logging
from datetime import datetime
import asyncio
from concurrent.futures import ThreadPoolExecutor

from backend.models.enhanced_dual_trainer import EnhancedDualTrainer

logger = logging.getLogger(__name__)

router = APIRouter()

# 全局训练器实例
trainer = EnhancedDualTrainer()

# 训练状态管理
training_status = {
    'is_training': False,
    'current_model': '',
    'progress': 0,
    'models_status': {
        'lightgbm': 'pending',
        'xgboost': 'pending',
        'ensemble': 'pending'
    },
    'results': {},
    'start_time': None,
    'logs': []
}

class TrainingConfig(BaseModel):
    """训练配置 - 简化版本，支持动态参数优化"""
    train_range: str = "3_months"
    
    # 集成参数（从前端保留的配置）
    enable_parallel: bool = True
    enable_shap: bool = True
    dynamic_weights: bool = True
    
    # 可选的自定义参数（用于高级用户，前端不显示）
    lightgbm: Optional[Dict[str, Any]] = None
    xgboost: Optional[Dict[str, Any]] = None
    
    models: List[str] = ["lightgbm", "xgboost", "ensemble"]

def get_optimal_parameters(train_range: str, sample_count: int = None) -> Dict[str, Dict[str, Any]]:
    """根据数据范围获取最优参数配置"""
    
    # 基于数据范围的参数配置
    params_config = {
        # 短期训练（1-3个月）
        "1_month": {
            "lightgbm": {
                "objective": "regression",
                "metric": "rmse",
                "num_leaves": 31,
                "learning_rate": 0.05,
                "n_estimators": 300,
                "feature_fraction": 0.9,
                "bagging_fraction": 0.8,
                "reg_alpha": 0.1,
                "reg_lambda": 0.1,
                "max_depth": 6,
                "min_child_samples": 10
            },
            "xgboost": {
                "max_depth": 4,
                "learning_rate": 0.05,
                "n_estimators": 100,
                "subsample": 0.6,
                "colsample_bytree": 0.6,
                "reg_alpha": 0.5,
                "reg_lambda": 0.5,
                "min_child_weight": 10,
                "gamma": 2.0
            }
        },
        "3_months": {
            "lightgbm": {
                "objective": "regression",
                "metric": "rmse",
                "num_leaves": 31,
                "learning_rate": 0.05,
                "n_estimators": 300,
                "feature_fraction": 0.9,
                "bagging_fraction": 0.8,
                "reg_alpha": 0.1,
                "reg_lambda": 0.1,
                "max_depth": 6,
                "min_child_samples": 10
            },
            "xgboost": {
                "max_depth": 4,
                "learning_rate": 0.05,
                "n_estimators": 100,
                "subsample": 0.6,
                "colsample_bytree": 0.6,
                "reg_alpha": 0.5,
                "reg_lambda": 0.5,
                "min_child_weight": 10,
                "gamma": 2.0
            }
        },
        # 中期训练（6个月-1年）
        "6_months": {
            "lightgbm": {
                "objective": "regression",
                "metric": "rmse",
                "num_leaves": 50,
                "learning_rate": 0.04,
                "n_estimators": 400,
                "feature_fraction": 0.85,
                "bagging_fraction": 0.8,
                "reg_alpha": 0.08,
                "reg_lambda": 0.08,
                "max_depth": 7,
                "min_child_samples": 15
            },
            "xgboost": {
                "max_depth": 5,
                "learning_rate": 0.04,
                "n_estimators": 200,
                "subsample": 0.7,
                "colsample_bytree": 0.7,
                "reg_alpha": 0.3,
                "reg_lambda": 0.3,
                "min_child_weight": 8,
                "gamma": 1.5
            }
        },
        "1_year": {
            "lightgbm": {
                "objective": "regression",
                "metric": "rmse",
                "num_leaves": 50,
                "learning_rate": 0.04,
                "n_estimators": 400,
                "feature_fraction": 0.85,
                "bagging_fraction": 0.8,
                "reg_alpha": 0.08,
                "reg_lambda": 0.08,
                "max_depth": 7,
                "min_child_samples": 15
            },
            "xgboost": {
                "max_depth": 5,
                "learning_rate": 0.04,
                "n_estimators": 200,
                "subsample": 0.7,
                "colsample_bytree": 0.7,
                "reg_alpha": 0.3,
                "reg_lambda": 0.3,
                "min_child_weight": 8,
                "gamma": 1.5
            }
        },
        # 中长期训练（3年）
        "3_years": {
            "lightgbm": {
                "objective": "regression",
                "metric": "rmse",
                "num_leaves": 60,
                "learning_rate": 0.035,
                "n_estimators": 500,
                "feature_fraction": 0.82,
                "bagging_fraction": 0.8,
                "reg_alpha": 0.07,
                "reg_lambda": 0.07,
                "max_depth": 7,
                "min_child_samples": 18
            },
            "xgboost": {
                "max_depth": 5,
                "learning_rate": 0.035,
                "n_estimators": 250,
                "subsample": 0.75,
                "colsample_bytree": 0.75,
                "reg_alpha": 0.25,
                "reg_lambda": 0.25,
                "min_child_weight": 6,
                "gamma": 1.2
            }
        },
        # 长期训练（2年）
        "2_years": {
            "lightgbm": {
                "objective": "regression",
                "metric": "rmse",
                "num_leaves": 80,
                "learning_rate": 0.03,
                "n_estimators": 600,
                "feature_fraction": 0.8,
                "bagging_fraction": 0.8,
                "reg_alpha": 0.05,
                "reg_lambda": 0.05,
                "max_depth": 8,
                "min_child_samples": 20
            },
            "xgboost": {
                "max_depth": 6,
                "learning_rate": 0.03,
                "n_estimators": 300,
                "subsample": 0.8,
                "colsample_bytree": 0.8,
                "reg_alpha": 0.2,
                "reg_lambda": 0.2,
                "min_child_weight": 5,
                "gamma": 1.0
            }
        },
        # 超长期训练（8年）
        "8_years": {
            "lightgbm": {
                "objective": "regression",
                "metric": "rmse",
                "num_leaves": 100,
                "learning_rate": 0.02,
                "n_estimators": 800,
                "feature_fraction": 0.75,
                "bagging_fraction": 0.8,
                "reg_alpha": 0.03,
                "reg_lambda": 0.03,
                "max_depth": 10,
                "min_child_samples": 30
            },
            "xgboost": {
                "max_depth": 8,
                "learning_rate": 0.02,
                "n_estimators": 500,
                "subsample": 0.85,
                "colsample_bytree": 0.85,
                "reg_alpha": 0.1,
                "reg_lambda": 0.1,
                "min_child_weight": 3,
                "gamma": 0.5
            }
        }
    }
    
    # 如果没有对应的配置，使用3_months作为默认值
    if train_range not in params_config:
        logger.warning(f"未找到数据范围 {train_range} 的参数配置，使用默认配置")
        train_range = "3_months"
    
    selected_config = params_config[train_range]
    logger.info(f"为数据范围 {train_range} 选择了优化参数配置")
    
    return selected_config

def add_training_log(message: str, model: str = None, log_type: str = "info"):
    """添加训练日志"""
    log_entry = {
        'time': datetime.now().strftime('%H:%M:%S'),
        'message': message,
        'model': model,
        'type': log_type
    }
    training_status['logs'].append(log_entry)
    logger.info(f"训练日志: {message}")

def update_training_status(model: str, status: str, progress: int = None):
    """更新训练状态"""
    training_status['models_status'][model] = status
    training_status['current_model'] = model
    if progress is not None:
        training_status['progress'] = progress
    
    # 添加状态变更日志
    add_training_log(f"{model.upper()}模型状态更新: {status}", model, "info")

async def train_models_background(config: TrainingConfig):
    """后台训练任务"""
    try:
        training_status['is_training'] = True
        training_status['start_time'] = datetime.now()
        training_status['logs'] = []
        
        # 获取基于数据范围的最优参数配置
        optimal_params = get_optimal_parameters(config.train_range)
        
        # 如果用户提供了自定义参数，则使用自定义参数；否则使用最优参数
        final_lightgbm_params = config.lightgbm if config.lightgbm else optimal_params['lightgbm']
        final_xgboost_params = config.xgboost if config.xgboost else optimal_params['xgboost']
        
        # 输出训练配置到控制台
        logger.info("=" * 80)
        logger.info("🚀 增强双模型训练开始")
        logger.info("=" * 80)
        logger.info(f"📅 训练数据范围: {config.train_range}")
        logger.info(f"🎯 训练模型列表: {config.models}")
        logger.info(f"⚡ 启用并行训练: {config.enable_parallel}")
        logger.info(f"🔍 启用SHAP分析: {config.enable_shap}")
        logger.info(f"🎛️ 动态权重调整: {config.dynamic_weights}")
        logger.info(f"🤖 参数优化策略: {'自定义参数' if config.lightgbm or config.xgboost else '数据范围自适应'}")
        logger.info("-" * 50)
        logger.info("📊 LightGBM 参数:")
        for key, value in final_lightgbm_params.items():
            logger.info(f"   {key}: {value}")
        logger.info("-" * 50)
        logger.info("📊 XGBoost 参数:")
        for key, value in final_xgboost_params.items():
            logger.info(f"   {key}: {value}")
        logger.info("=" * 80)
        
        add_training_log("开始增强双模型训练", None, "info")
        add_training_log(f"使用数据范围: {config.train_range}", None, "info")
        
        # 更新训练器配置
        trainer.update_model_config('lightgbm', final_lightgbm_params)
        add_training_log("LightGBM配置已更新（自适应优化）", 'lightgbm', "info")
        
        trainer.update_model_config('xgboost', final_xgboost_params)
        add_training_log("XGBoost配置已更新（自适应优化）", 'xgboost', "info")
        
        # 开始训练
        update_training_status('lightgbm', 'training', 10)
        update_training_status('xgboost', 'training', 10)
        
        if config.enable_parallel:
            add_training_log("启用并行训练模式", None, "info")
            # 并行训练
            results = trainer.train_all_models_parallel(config.train_range)
        else:
            add_training_log("启用串行训练模式", None, "info")
            # 串行训练
            X, y = trainer.load_data(config.train_range)
            from sklearn.model_selection import train_test_split
            X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)
            
            results = {}
            training_start_time = datetime.now()
            
            # 训练LightGBM
            update_training_status('lightgbm', 'training', 25)
            results['lightgbm'] = trainer.train_lightgbm(X_train, y_train, X_test, y_test, training_start_time)
            update_training_status('lightgbm', 'completed', 50)
            
            # 训练XGBoost
            update_training_status('xgboost', 'training', 60)
            results['xgboost'] = trainer.train_xgboost(X_train, y_train, X_test, y_test, training_start_time)
            update_training_status('xgboost', 'completed', 80)
        
        # 更新结果
        training_status['results'] = results
        
        # 处理集成模型状态
        update_training_status('ensemble', 'training', 90)
        
        # 检查集成模型是否已在并行训练中创建
        if 'ensemble' in results and results['ensemble'].get('mae', 0) > 0:
            update_training_status('ensemble', 'completed', 100)
            add_training_log("集成模型配置完成", 'ensemble', "success")
        elif all(model_result.get('mae', 0) > 0 for model_result in results.values() if isinstance(model_result, dict)):
            # 如果基础模型都成功但没有ensemble结果，手动创建
            try:
                dynamic_weights = trainer._calculate_dynamic_weights(results)
                ensemble_result = {
                    'mae': sum(results[model]['mae'] * weight for model, weight in dynamic_weights.items()),
                    'rmse': sum(results[model]['rmse'] * weight for model, weight in dynamic_weights.items()),
                    'r2': sum(results[model]['r2'] * weight for model, weight in dynamic_weights.items()),
                    'mape': sum(results[model]['mape'] * weight for model, weight in dynamic_weights.items()),
                    'weights': dynamic_weights,
                    'training_time': sum(results[model].get('training_time', 0) for model in results.keys())
                }
                results['ensemble'] = ensemble_result
                update_training_status('ensemble', 'completed', 100)
                add_training_log("集成模型配置完成", 'ensemble', "success")
            except Exception as e:
                logger.error(f"创建集成模型失败: {e}")
                update_training_status('ensemble', 'failed', 100)
                add_training_log("集成模型配置失败", 'ensemble', "error")
        else:
            update_training_status('ensemble', 'failed', 100)
            add_training_log("集成模型配置失败：基础模型训练不成功", 'ensemble', "error")
        
        # 训练完成
        training_status['is_training'] = False
        training_status['current_model'] = ''
        training_status['progress'] = 100
        
        total_time = (datetime.now() - training_status['start_time']).total_seconds()
        add_training_log(f"训练完成，总耗时: {total_time:.2f}秒", None, "success")
        
        # 生成SHAP分析报告
        if config.enable_shap:
            add_training_log("开始生成SHAP可解释性分析报告", None, "info")
            for model_name in ['lightgbm', 'xgboost']:
                if model_name in results and results[model_name].get('mae', 0) > 0:
                    interpretability = trainer.get_model_interpretability(model_name)
                    if interpretability:
                        results[model_name]['interpretability'] = interpretability
                        add_training_log(f"{model_name.upper()}可解释性分析完成", model_name, "success")
        
        logger.info("增强双模型训练完成")
        
    except Exception as e:
        logger.error(f"训练过程异常: {e}")
        training_status['is_training'] = False
        training_status['current_model'] = ''
        add_training_log(f"训练异常: {str(e)}", None, "error")
        
        # 更新失败状态
        for model in training_status['models_status']:
            if training_status['models_status'][model] == 'training':
                training_status['models_status'][model] = 'failed'

@router.post("/train/start")
async def start_training(config: TrainingConfig, background_tasks: BackgroundTasks):
    """开始训练"""
    try:
        if training_status['is_training']:
            raise HTTPException(status_code=400, detail="训练正在进行中")
        
        # 重置状态
        training_status['models_status'] = {
            'lightgbm': 'pending',
            'xgboost': 'pending',
            'ensemble': 'pending'
        }
        training_status['results'] = {}
        training_status['progress'] = 0
        
        # 启动后台训练任务
        background_tasks.add_task(train_models_background, config)
        
        return {
            'success': True,
            'message': '训练任务已启动',
            'config': config.dict(),
            'estimated_time': '预计5-10分钟'
        }
        
    except Exception as e:
        logger.error(f"启动训练失败: {e}")
        return JSONResponse(
            status_code=500,
            content={
                'success': False,
                'error': str(e)
            }
        )

@router.get("/train/status")
async def get_training_status():
    """获取训练状态"""
    try:
        return {
            'success': True,
            'is_training': training_status['is_training'],
            'current_model': training_status['current_model'],
            'progress': training_status['progress'],
            'models_status': training_status['models_status'],
            'results': training_status['results'],
            'logs': training_status['logs'][-50:],  # 最近50条日志
            'start_time': training_status['start_time'].isoformat() if training_status['start_time'] else None
        }
        
    except Exception as e:
        logger.error(f"获取训练状态失败: {e}")
        return JSONResponse(
            status_code=500,
            content={
                'success': False,
                'error': str(e)
            }
        )

@router.get("/train/results")
async def get_training_results():
    """获取训练结果"""
    try:
        return {
            'success': True,
            'results': training_status['results'],
            'summary': {
                'total_models': len(training_status['models_status']),
                'completed_models': len([s for s in training_status['models_status'].values() if s == 'completed']),
                'training_time': (datetime.now() - training_status['start_time']).total_seconds() if training_status['start_time'] else 0
            }
        }
        
    except Exception as e:
        logger.error(f"获取训练结果失败: {e}")
        return JSONResponse(
            status_code=500,
            content={
                'success': False,
                'error': str(e)
            }
        )

@router.get("/train/interpretability/{model_name}")
async def get_model_interpretability(model_name: str):
    """获取模型可解释性分析"""
    try:
        if model_name not in ['lightgbm', 'xgboost']:
            raise HTTPException(status_code=400, detail="不支持的模型")
        
        interpretability = trainer.get_model_interpretability(model_name)
        
        return {
            'success': True,
            'model': model_name,
            'interpretability': interpretability
        }
        
    except Exception as e:
        logger.error(f"获取可解释性分析失败: {e}")
        return JSONResponse(
            status_code=500,
            content={
                'success': False,
                'error': str(e)
            }
        )

@router.post("/train/stop")
async def stop_training():
    """停止训练"""
    try:
        training_status['is_training'] = False
        training_status['current_model'] = ''
        add_training_log("训练已被用户停止", None, "warning")
        
        return {
            'success': True,
            'message': '训练已停止'
        }
        
    except Exception as e:
        logger.error(f"停止训练失败: {e}")
        return JSONResponse(
            status_code=500,
            content={
                'success': False,
                'error': str(e)
            }
        )

@router.get("/models/status")
async def get_models_status():
    """获取模型状态"""
    try:
        return {
            'success': True,
            'models': [
                {
                    'key': 'lightgbm',
                    'name': 'LightGBM',
                    'status': training_status['models_status']['lightgbm'],
                    'description': '轻量级梯度提升机，训练速度快，内存占用低'
                },
                {
                    'key': 'xgboost',
                    'name': 'XGBoost',
                    'status': training_status['models_status']['xgboost'],
                    'description': '极端梯度提升，鲁棒性强，特征工程友好'
                },
                {
                    'key': 'ensemble',
                    'name': '智能集成模型',
                    'status': training_status['models_status']['ensemble'],
                    'description': '动态权重集成，结合两个模型的优势'
                }
            ]
        }
        
    except Exception as e:
        logger.error(f"获取模型状态失败: {e}")
        return JSONResponse(
            status_code=500,
            content={
                'success': False,
                'error': str(e)
            }
        )