"""
并行协调器模块

实现并行执行多个智能体的协调策略
"""

import asyncio
import time
from dataclasses import dataclass
from datetime import datetime
from typing import Any, Dict, List, Optional, Set

from agents.base_agent import BaseAgent
from schema import Task, TaskStatus, Result
from .base_coordinator import BaseCoordinator


@dataclass
class ParallelExecutionResult:
    """并行执行结果"""
    agent_id: str
    agent_name: str
    result: Result
    execution_time: float
    success: bool
    error_message: Optional[str] = None


class ParallelCoordinator(BaseCoordinator):
    """
    并行协调器
    
    同时执行多个智能体，适用于可以并行处理的任务
    """
    
    def __init__(self, coordinator_id: str, name: str, max_workers: int = 4,
                 timeout: int = 300, retry_attempts: int = 2):
        """
        初始化并行协调器
        
        Args:
            coordinator_id: 协调器ID
            name: 协调器名称
            max_workers: 最大工作线程数
            timeout: 超时时间（秒）
            retry_attempts: 重试次数
        """
        super().__init__(coordinator_id, name)
        
        self.max_workers = max_workers
        self.timeout = timeout
        self.retry_attempts = retry_attempts
        self._execution_semaphore = asyncio.Semaphore(max_workers)
        self._active_tasks: Set[str] = set()
        self._execution_history: List[Dict[str, Any]] = []
    
    async def coordinate(self, task: Task) -> Result:
        """
        协调执行任务
        
        Args:
            task: 要执行的任务
            
        Returns:
            执行结果
        """
        start_time = time.time()
        
        try:
            # 更新任务状态
            task.update_status(TaskStatus.IN_PROGRESS)
            
            # 检查是否有可用的智能体
            if not self.agents:
                return Result(
                    status=TaskStatus.FAILED,
                    data=None,
                    error_message="没有可用的智能体",
                    metadata={
                        "coordinator_id": self.coordinator_id,
                        "execution_time": time.time() - start_time
                    }
                )
            
            # 并行执行所有智能体
            execution_results = await self._execute_parallel(task)
            
            # 聚合结果
            final_result = self._aggregate_results(execution_results, task)
            
            # 记录执行历史
            self._record_execution(task, execution_results, final_result, time.time() - start_time)
            
            return final_result
            
        except Exception as e:
            error_result = Result(
                status=TaskStatus.FAILED,
                data=None,
                error_message=f"并行协调执行失败: {str(e)}",
                metadata={
                    "coordinator_id": self.coordinator_id,
                    "execution_time": time.time() - start_time
                }
            )
            
            self._record_execution(task, [], error_result, time.time() - start_time)
            return error_result
    
    async def _execute_parallel(self, task: Task) -> List[ParallelExecutionResult]:
        """
        并行执行所有智能体
        
        Args:
            task: 要执行的任务
            
        Returns:
            执行结果列表
        """
        # 创建执行任务
        execution_tasks = []
        for agent_id, (agent, role) in self.agents.items():
            execution_task = self._execute_agent_with_retry(agent, task, agent_id, role)
            execution_tasks.append(execution_task)
        
        # 并行执行所有任务
        results = await asyncio.gather(*execution_tasks, return_exceptions=True)
        
        # 处理结果
        execution_results = []
        for i, result in enumerate(results):
            agent_id = list(self.agents.keys())[i]
            agent, role = self.agents[agent_id]
            
            if isinstance(result, Exception):
                execution_results.append(ParallelExecutionResult(
                    agent_id=agent_id,
                    agent_name=agent.name,
                    result=None,
                    execution_time=0.0,
                    success=False,
                    error_message=str(result)
                ))
            else:
                execution_results.append(result)
        
        return execution_results
    
    async def _execute_agent_with_retry(self, agent: BaseAgent, task: Task,
                                      agent_id: str, role: str) -> ParallelExecutionResult:
        """
        带重试的智能体执行
        
        Args:
            agent: 智能体
            task: 任务
            agent_id: 智能体ID
            role: 角色
            
        Returns:
            执行结果
        """
        start_time = time.time()
        last_error = None
        
        for attempt in range(self.retry_attempts + 1):
            try:
                # 使用信号量限制并发数
                async with self._execution_semaphore:
                    # 执行任务
                    result = await asyncio.wait_for(
                        agent.act(task),
                        timeout=self.timeout
                    )
                
                execution_time = time.time() - start_time
                
                return ParallelExecutionResult(
                    agent_id=agent_id,
                    agent_name=agent.name,
                    result=result,
                    execution_time=execution_time,
                    success=result.status.value == "success"
                )
                
            except asyncio.TimeoutError:
                last_error = f"执行超时 (超时时间: {self.timeout}秒)"
            except Exception as e:
                last_error = str(e)
            
            # 如果不是最后一次尝试，等待一段时间后重试
            if attempt < self.retry_attempts:
                await asyncio.sleep(1 * (attempt + 1))  # 递增等待时间
        
        # 所有重试都失败了
        execution_time = time.time() - start_time
        return ParallelExecutionResult(
            agent_id=agent_id,
            agent_name=agent.name,
            result=None,
            execution_time=execution_time,
            success=False,
            error_message=last_error
        )
    
    def _aggregate_results(self, execution_results: List[ParallelExecutionResult],
                          task: Task) -> Result:
        """
        聚合执行结果
        
        Args:
            execution_results: 执行结果列表
            task: 原始任务
            
        Returns:
            聚合后的结果
        """
        # 统计成功和失败的数量
        successful_results = [r for r in execution_results if r.success]
        failed_results = [r for r in execution_results if not r.success]
        
        # 计算总执行时间
        total_execution_time = sum(r.execution_time for r in execution_results)
        max_execution_time = max(r.execution_time for r in execution_results) if execution_results else 0
        
        # 聚合数据
        aggregated_data = {
            "task_id": task.id,
            "task_title": task.title,
            "total_agents": len(execution_results),
            "successful_agents": len(successful_results),
            "failed_agents": len(failed_results),
            "total_execution_time": total_execution_time,
            "max_execution_time": max_execution_time,
            "results": []
        }
        
        # 添加成功的结果
        for result in successful_results:
            aggregated_data["results"].append({
                "agent_id": result.agent_id,
                "agent_name": result.agent_name,
                "success": True,
                "execution_time": result.execution_time,
                "data": result.result.data if result.result else None
            })
        
        # 添加失败的结果
        for result in failed_results:
            aggregated_data["results"].append({
                "agent_id": result.agent_id,
                "agent_name": result.agent_name,
                "success": False,
                "execution_time": result.execution_time,
                "error": result.error_message
            })
        
        # 确定最终状态
        if len(successful_results) == len(execution_results):
            # 所有智能体都成功
            final_status = TaskStatus.COMPLETED
            error_message = None
        elif len(successful_results) > 0:
            # 部分成功
            final_status = TaskStatus.COMPLETED
            error_message = f"部分执行成功: {len(successful_results)}/{len(execution_results)} 个智能体成功"
        else:
            # 全部失败
            final_status = TaskStatus.FAILED
            error_message = "所有智能体执行失败"
        
        return Result(
            status=final_status,
            data=aggregated_data,
            error_message=error_message,
            metadata={
                "coordinator_type": "parallel",
                "coordinator_id": self.coordinator_id,
                "execution_strategy": "parallel",
                "max_workers": self.max_workers,
                "timeout": self.timeout,
                "retry_attempts": self.retry_attempts
            }
        )
    
    def _record_execution(self, task: Task, execution_results: List[ParallelExecutionResult],
                         final_result: Result, total_time: float) -> None:
        """
        记录执行历史
        
        Args:
            task: 任务
            execution_results: 执行结果
            final_result: 最终结果
            total_time: 总执行时间
        """
        execution_record = {
            "timestamp": datetime.now().isoformat(),
            "task_id": task.id,
            "task_title": task.title,
            "total_time": total_time,
            "agent_count": len(execution_results),
            "successful_count": len([r for r in execution_results if r.success]),
            "final_status": final_result.status.value,
            "execution_results": [
                {
                    "agent_id": r.agent_id,
                    "agent_name": r.agent_name,
                    "success": r.success,
                    "execution_time": r.execution_time,
                    "error": r.error_message
                }
                for r in execution_results
            ]
        }
        
        self._execution_history.append(execution_record)
        
        # 限制历史记录数量
        if len(self._execution_history) > 100:
            self._execution_history = self._execution_history[-100:]
    
    def get_execution_history(self, limit: int = 10) -> List[Dict[str, Any]]:
        """
        获取执行历史
        
        Args:
            limit: 限制数量
            
        Returns:
            执行历史列表
        """
        return self._execution_history[-limit:]
    
    def get_performance_stats(self) -> Dict[str, Any]:
        """
        获取性能统计信息
        
        Returns:
            性能统计信息
        """
        if not self._execution_history:
            return {
                "total_executions": 0,
                "success_rate": 0.0,
                "average_execution_time": 0.0,
                "average_agent_count": 0.0
            }
        
        total_executions = len(self._execution_history)
        successful_executions = len([h for h in self._execution_history 
                                   if h["final_status"] == "completed"])
        success_rate = successful_executions / total_executions if total_executions > 0 else 0.0
        
        total_time = sum(h["total_time"] for h in self._execution_history)
        average_execution_time = total_time / total_executions if total_executions > 0 else 0.0
        
        total_agents = sum(h["agent_count"] for h in self._execution_history)
        average_agent_count = total_agents / total_executions if total_executions > 0 else 0.0
        
        return {
            "total_executions": total_executions,
            "success_rate": success_rate,
            "average_execution_time": average_execution_time,
            "average_agent_count": average_agent_count,
            "max_workers": self.max_workers,
            "timeout": self.timeout,
            "retry_attempts": self.retry_attempts
        }
    
    def get_status(self) -> Dict[str, Any]:
        """
        获取协调器状态
        
        Returns:
            状态信息
        """
        base_status = super().get_status()
        base_status.update({
            "coordinator_type": "parallel",
            "max_workers": self.max_workers,
            "timeout": self.timeout,
            "retry_attempts": self.retry_attempts,
            "active_tasks": len(self._active_tasks),
            "execution_history_count": len(self._execution_history),
            "performance_stats": self.get_performance_stats()
        })
        
        return base_status
    
    def set_max_workers(self, max_workers: int) -> None:
        """
        设置最大工作线程数
        
        Args:
            max_workers: 最大工作线程数
        """
        self.max_workers = max_workers
        self._execution_semaphore = asyncio.Semaphore(max_workers)
    
    def set_timeout(self, timeout: int) -> None:
        """
        设置超时时间
        
        Args:
            timeout: 超时时间（秒）
        """
        self.timeout = timeout
    
    def set_retry_attempts(self, retry_attempts: int) -> None:
        """
        设置重试次数
        
        Args:
            retry_attempts: 重试次数
        """
        self.retry_attempts = retry_attempts
    
    def clear_execution_history(self) -> None:
        """清空执行历史"""
        self._execution_history.clear()
    
    async def execute_with_priority(self, task: Task, priority_agents: List[str]) -> Result:
        """
        按优先级执行任务
        
        Args:
            task: 要执行的任务
            priority_agents: 优先执行的智能体ID列表
            
        Returns:
            执行结果
        """
        # 将智能体分为优先级和非优先级两组
        priority_agent_tasks = []
        normal_agent_tasks = []
        
        for agent_id, (agent, role) in self.agents.items():
            execution_task = self._execute_agent_with_retry(agent, task, agent_id, role)
            
            if agent_id in priority_agents:
                priority_agent_tasks.append(execution_task)
            else:
                normal_agent_tasks.append(execution_task)
        
        # 先执行优先级智能体
        priority_results = []
        if priority_agent_tasks:
            priority_results = await asyncio.gather(*priority_agent_tasks, return_exceptions=True)
        
        # 再执行普通智能体
        normal_results = []
        if normal_agent_tasks:
            normal_results = await asyncio.gather(*normal_agent_tasks, return_exceptions=True)
        
        # 合并结果
        all_results = priority_results + normal_results
        
        # 处理异常结果
        execution_results = []
        all_agent_ids = list(self.agents.keys())
        
        for i, result in enumerate(all_results):
            agent_id = all_agent_ids[i]
            agent, role = self.agents[agent_id]
            
            if isinstance(result, Exception):
                execution_results.append(ParallelExecutionResult(
                    agent_id=agent_id,
                    agent_name=agent.name,
                    result=None,
                    execution_time=0.0,
                    success=False,
                    error_message=str(result)
                ))
            else:
                execution_results.append(result)
        
        # 聚合结果
        final_result = self._aggregate_results(execution_results, task)
        
        # 记录执行历史
        self._record_execution(task, execution_results, final_result, 0.0)
        
        return final_result
    
    def __str__(self) -> str:
        return f"ParallelCoordinator(id='{self.coordinator_id}', agents={len(self.agents)}, max_workers={self.max_workers})"
    
    def __repr__(self) -> str:
        return f"<ParallelCoordinator(id='{self.coordinator_id}', name='{self.name}', agents={len(self.agents)})>"
