"""
集成示例 - 展示如何使用P1优先级任务中实现的模块

这个示例展示了如何集成：
- 事件总线
- ResultAggregator
- WebhookNotificationSystem
- 分布式追踪
- 延迟监控
- Redis/etcd状态存储
"""

import asyncio
import logging
import time
from datetime import datetime, timezone

from .event_bus import EventBus, EventType, get_event_bus
from .result_aggregator import ResultAggregator, TaskStatus
from .webhook_notification_system import WebhookNotificationSystem, NotificationConfig
from .distributed_tracing import initialize_tracing, get_task_tracer, TraceConfig
from .latency_monitor import LatencyMonitor
from .redis_state_storage import RedisStateStorage
from .etcd_state_storage import EtcdStateStorage

# 配置日志
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)


class GPUAutoScalingSystem:
    """GPU自动扩缩容系统集成示例"""
    
    def __init__(self):
        # 初始化组件
        self.event_bus = get_event_bus()
        self.result_aggregator = None
        self.webhook_system = None
        self.latency_monitor = None
        self.state_storage = None
        
        # 追踪器
        self.task_tracer = None
        
        logger.info("GPU自动扩缩容系统已初始化")
    
    async def initialize(self, 
                        use_redis: bool = True,
                        use_jaeger: bool = True) -> None:
        """初始化系统"""
        try:
            # 初始化分布式追踪
            if use_jaeger:
                trace_config = TraceConfig(
                    service_name="gpu-auto-scaling-system",
                    jaeger_endpoint="http://localhost:14268/api/traces",
                    environment="development"
                )
                initialize_tracing(trace_config)
                self.task_tracer = get_task_tracer()
                logger.info("分布式追踪已初始化")
            
            # 初始化状态存储
            if use_redis:
                self.state_storage = RedisStateStorage(
                    redis_url="redis://localhost:6379",
                    key_prefix="gpu_auto_scaling"
                )
                await self.state_storage.connect()
                logger.info("Redis状态存储已初始化")
            else:
                self.state_storage = EtcdStateStorage(
                    host="localhost",
                    port=2379,
                    key_prefix="/gpu_auto_scaling"
                )
                await self.state_storage.connect()
                logger.info("etcd状态存储已初始化")
            
            # 初始化事件总线
            await self.event_bus.start()
            logger.info("事件总线已启动")
            
            # 初始化结果聚合器
            self.result_aggregator = ResultAggregator(
                state_storage=self.state_storage,
                event_bus=self.event_bus
            )
            logger.info("结果聚合器已初始化")
            
            # 初始化Webhook通知系统
            notification_config = NotificationConfig(
                webhook_timeout=10,
                max_retries=3,
                enable_signature_verification=True
            )
            self.webhook_system = WebhookNotificationSystem(
                config=notification_config,
                event_bus=self.event_bus
            )
            logger.info("Webhook通知系统已初始化")
            
            # 初始化延迟监控
            self.latency_monitor = LatencyMonitor(
                event_bus=self.event_bus,
                max_samples=10000,
                time_window_minutes=60
            )
            logger.info("延迟监控已初始化")
            
            logger.info("系统初始化完成")
            
        except Exception as e:
            logger.error(f"系统初始化失败: {e}")
            raise
    
    async def shutdown(self) -> None:
        """关闭系统"""
        try:
            # 停止事件总线
            await self.event_bus.stop()
            
            # 断开状态存储连接
            if self.state_storage:
                await self.state_storage.disconnect()
            
            logger.info("系统已关闭")
            
        except Exception as e:
            logger.error(f"系统关闭失败: {e}")
    
    async def submit_task(self, 
                         task_id: str,
                         client_ip: str,
                         callback_url: str,
                         correlation_id: str = None) -> None:
        """提交任务"""
        try:
            # 创建任务追踪
            if self.task_tracer:
                trace_context = self.task_tracer.create_task_trace(
                    task_id=task_id,
                    operation="submit",
                    attributes={
                        "client_ip": client_ip,
                        "callback_url": callback_url
                    }
                )
            else:
                trace_context = None
            
            # 创建任务记录
            task_record = await self.result_aggregator.create_task_record(
                task_id=task_id,
                client_ip=client_ip,
                correlation_id=correlation_id,
                metadata={
                    "callback_url": callback_url,
                    "trace_context": trace_context
                }
            )
            
            logger.info(f"任务已提交: {task_id}")
            
        except Exception as e:
            logger.error(f"提交任务失败: {task_id}, 错误: {e}")
            raise
    
    async def assign_task(self, 
                         task_id: str,
                         server_id: str,
                         server_endpoint: str) -> None:
        """分配任务到服务器"""
        try:
            # 追踪任务分配
            if self.task_tracer:
                async with self.task_tracer.trace_async_task_operation(
                    task_id=task_id,
                    operation="assign",
                    attributes={
                        "server_id": server_id,
                        "server_endpoint": server_endpoint
                    }
                ) as span:
                    await self.result_aggregator.assign_task(
                        task_id=task_id,
                        server_id=server_id,
                        server_endpoint=server_endpoint
                    )
                    
                    self.task_tracer.add_task_event(
                        span=span,
                        event_name="task_assigned",
                        task_id=task_id,
                        attributes={
                            "server_id": server_id,
                            "server_endpoint": server_endpoint
                        }
                    )
            else:
                await self.result_aggregator.assign_task(
                    task_id=task_id,
                    server_id=server_id,
                    server_endpoint=server_endpoint
                )
            
            logger.info(f"任务已分配: {task_id} -> {server_id}")
            
        except Exception as e:
            logger.error(f"分配任务失败: {task_id}, 错误: {e}")
            raise
    
    async def start_task_processing(self, task_id: str) -> None:
        """开始任务处理"""
        try:
            # 追踪任务开始
            if self.task_tracer:
                async with self.task_tracer.trace_async_task_operation(
                    task_id=task_id,
                    operation="start_processing"
                ) as span:
                    await self.result_aggregator.start_task_processing(task_id)
                    
                    self.task_tracer.add_task_event(
                        span=span,
                        event_name="processing_started",
                        task_id=task_id
                    )
            else:
                await self.result_aggregator.start_task_processing(task_id)
            
            logger.info(f"任务开始处理: {task_id}")
            
        except Exception as e:
            logger.error(f"开始任务处理失败: {task_id}, 错误: {e}")
            raise
    
    async def complete_task(self, 
                           task_id: str,
                           result_url: str,
                           processing_duration: float) -> None:
        """完成任务"""
        try:
            # 追踪任务完成
            if self.task_tracer:
                async with self.task_tracer.trace_async_task_operation(
                    task_id=task_id,
                    operation="complete",
                    attributes={
                        "result_url": result_url,
                        "processing_duration": processing_duration
                    }
                ) as span:
                    # 发布任务完成事件
                    await self.event_bus.publish(
                        EventType.TASK_COMPLETED,
                        source="gpu_server",
                        data={
                            "task_id": task_id,
                            "result_url": result_url,
                            "processing_duration": processing_duration
                        }
                    )
                    
                    self.task_tracer.mark_task_success(
                        span=span,
                        task_id=task_id,
                        result_url=result_url,
                        processing_duration=processing_duration
                    )
            else:
                # 发布任务完成事件
                await self.event_bus.publish(
                    EventType.TASK_COMPLETED,
                    source="gpu_server",
                    data={
                        "task_id": task_id,
                        "result_url": result_url,
                        "processing_duration": processing_duration
                    }
                )
            
            logger.info(f"任务已完成: {task_id}, 结果URL: {result_url}")
            
        except Exception as e:
            logger.error(f"完成任务失败: {task_id}, 错误: {e}")
            raise
    
    async def fail_task(self, 
                       task_id: str,
                       error_message: str,
                       processing_duration: float = None) -> None:
        """任务失败"""
        try:
            # 追踪任务失败
            if self.task_tracer:
                async with self.task_tracer.trace_async_task_operation(
                    task_id=task_id,
                    operation="fail",
                    attributes={
                        "error_message": error_message,
                        "processing_duration": processing_duration
                    }
                ) as span:
                    # 发布任务失败事件
                    await self.event_bus.publish(
                        EventType.TASK_FAILED,
                        source="gpu_server",
                        data={
                            "task_id": task_id,
                            "error_message": error_message,
                            "processing_duration": processing_duration
                        }
                    )
                    
                    self.task_tracer.mark_task_failure(
                        span=span,
                        task_id=task_id,
                        error_message=error_message,
                        processing_duration=processing_duration
                    )
            else:
                # 发布任务失败事件
                await self.event_bus.publish(
                    EventType.TASK_FAILED,
                    source="gpu_server",
                    data={
                        "task_id": task_id,
                        "error_message": error_message,
                        "processing_duration": processing_duration
                    }
                )
            
            logger.warning(f"任务失败: {task_id}, 错误: {error_message}")
            
        except Exception as e:
            logger.error(f"标记任务失败时出错: {task_id}, 错误: {e}")
            raise
    
    async def get_system_statistics(self) -> dict:
        """获取系统统计信息"""
        try:
            stats = {
                "timestamp": datetime.now(timezone.utc).isoformat(),
                "event_bus": await self.event_bus.get_event_statistics(),
                "result_aggregator": await self.result_aggregator.get_statistics(),
                "webhook_system": await self.webhook_system.get_statistics(),
                "latency_monitor": self.latency_monitor.get_performance_summary(),
                "state_storage": await self.state_storage.get_statistics()
            }
            
            return stats
            
        except Exception as e:
            logger.error(f"获取系统统计信息失败: {e}")
            return {}


async def main():
    """主函数 - 演示系统使用"""
    system = GPUAutoScalingSystem()
    
    try:
        # 初始化系统
        await system.initialize(use_redis=True, use_jaeger=True)
        
        # 模拟任务处理流程
        task_id = "task_001"
        client_ip = "192.168.1.100"
        callback_url = "http://client.example.com/webhook"
        server_id = "gpu-server-001"
        server_endpoint = "http://gpu-server-001:8000"
        
        # 1. 提交任务
        await system.submit_task(
            task_id=task_id,
            client_ip=client_ip,
            callback_url=callback_url,
            correlation_id="corr_001"
        )
        
        # 等待一下
        await asyncio.sleep(0.1)
        
        # 2. 分配任务
        await system.assign_task(
            task_id=task_id,
            server_id=server_id,
            server_endpoint=server_endpoint
        )
        
        # 等待一下
        await asyncio.sleep(0.1)
        
        # 3. 开始处理
        await system.start_task_processing(task_id)
        
        # 模拟处理时间
        await asyncio.sleep(2.0)
        
        # 4. 完成任务
        await system.complete_task(
            task_id=task_id,
            result_url=f"http://gpu-server-001:8000/results/{task_id}",
            processing_duration=2.1
        )
        
        # 等待事件处理
        await asyncio.sleep(1.0)
        
        # 获取系统统计信息
        stats = await system.get_system_statistics()
        logger.info(f"系统统计信息: {stats}")
        
        # 等待一下让所有事件处理完成
        await asyncio.sleep(2.0)
        
    except Exception as e:
        logger.error(f"演示过程中出错: {e}")
    finally:
        # 关闭系统
        await system.shutdown()


if __name__ == "__main__":
    asyncio.run(main())
