"""
工作流池管理模块
提供多工作流实例管理和并发控制
"""

import asyncio
from typing import List, Dict, Any, Optional, Callable, Union, Coroutine, Awaitable, Tuple, TypeVar, DefaultDict
import asyncio
from concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor
from collections import defaultdict, deque
import logging
from dataclasses import dataclass, field
import time
import threading
import json
import heapq
import uuid
from src.utils.metrics import track_performance, WORKFLOW_EXECUTIONS, WORKFLOW_DURATION
from langchain_core.runnables import RunnableConfig
from langchain_core.messages import AnyMessage
from src.research_core.enhanced_multi_agent_workflow import EnhancedMultiAgentState as OptimizedMultiAgentState

logger = logging.getLogger(__name__)

# 定义类型变量
StateType = Dict[str, Any]
ResultType = Dict[str, Any]
MessageType = Dict[str, Any]
ImageType = Dict[str, Any]
TableType = Dict[str, Any]
VisualizationType = Dict[str, Any]

# 定义回调类型
ResultCallback = Optional[Union[Callable[[ResultType], None], Callable[[ResultType], Awaitable[None]]]]
ErrorCallback = Optional[Union[Callable[[str], None], Callable[[str], Awaitable[None]]]]
BatchCallback = Optional[Union[Callable[[List[ResultType]], None], Callable[[List[ResultType]], Awaitable[None]]]]

@dataclass
class WorkflowTask:
    """工作流任务"""
    question: str
    callback: ResultCallback = None
    metadata: Optional[Dict[str, Any]] = None
    submit_time: Optional[float] = None
    task_id: str = field(default_factory=lambda: str(uuid.uuid4()))
    priority: int = 0  # 任务优先级，0为普通，数值越大优先级越高
    timeout: int = 300  # 超时时间（秒）
    workflow_type: str = "optimized"  # 工作流类型
    persistent: bool = False  # 是否持久化任务
    
    def __post_init__(self):
        if self.submit_time is None:
            self.submit_time = time.time()
    
    def to_dict(self) -> Dict[str, Any]:
        """将任务转换为字典，用于持久化"""
        return {
            "question": self.question,
            "metadata": self.metadata,
            "submit_time": self.submit_time,
            "task_id": self.task_id,
            "priority": self.priority,
            "timeout": self.timeout,
            "workflow_type": self.workflow_type,
            "persistent": self.persistent
        }
    
    @classmethod
    def from_dict(cls, data: Dict[str, Any]) -> 'WorkflowTask':
        """从字典创建任务实例"""
        return cls(**data)

class PriorityTaskQueue:
    """支持优先级的任务队列"""
    
    def __init__(self):
        self._queue = []
        self._lock = threading.Lock()
        
    def put(self, task: WorkflowTask):
        """添加任务到队列"""
        # 使用负数优先级，因为heapq是最小堆，而我们希望高优先级先执行
        priority_item = (-task.priority, task.submit_time, task.task_id, task)
        with self._lock:
            heapq.heappush(self._queue, priority_item)
    
    def get(self) -> WorkflowTask:
        """获取优先级最高的任务"""
        with self._lock:
            if not self._queue:
                raise asyncio.QueueEmpty()
            _, _, _, task = heapq.heappop(self._queue)
            return task
    
    def empty(self) -> bool:
        """检查队列是否为空"""
        with self._lock:
            return len(self._queue) == 0
    
    def qsize(self) -> int:
        """获取队列大小"""
        with self._lock:
            return len(self._queue)

class SmartTaskQueue(PriorityTaskQueue):
    """智能任务队列"""
    
    def __init__(self, max_size: int = 1000):
        super().__init__()
        self.max_size = max_size
        self.task_groups: DefaultDict[str, List[str]] = defaultdict(list)  # 按问题类型分组任务
        self.waiting_times: Dict[str, float] = {}  # 记录任务等待时间
        self.processing_times: Dict[str, float] = {}  # 记录任务处理时间
        
    def put(self, task: WorkflowTask):
        """添加任务到队列，实现智能分组和优先级调整"""
        # 按问题类型分组
        question_type = self._classify_question(task.question)
        self.task_groups[question_type].append(task.task_id or "")
        
        # 动态调整优先级：等待时间越长，优先级越高
        current_time = time.time()
        wait_time = current_time - (task.submit_time or current_time)
        if wait_time > 60:  # 等待超过1分钟，提高优先级
            task.priority = min(task.priority + int(wait_time / 60), 10)
            
        # 调用父类方法添加到优先级队列
        super().put(task)
        
        # 记录等待时间
        if task.task_id:
            self.waiting_times[task.task_id] = current_time
        
        # 控制队列大小
        if self.qsize() > self.max_size:
            logger.warning("任务队列已满，可能需要增加处理能力")
    
    def _classify_question(self, question: str) -> str:
        """简单的问题分类"""
        question = question.lower()
        if any(keyword in question for keyword in ['how', 'what', 'why', 'explain']):
            return 'explanation'
        elif any(keyword in question for keyword in ['compare', 'difference', 'vs']):
            return 'comparison'
        elif any(keyword in question for keyword in ['step', 'process', 'procedure']):
            return 'procedure'
        else:
            return 'general'
    
    def get(self) -> WorkflowTask:
        """获取任务，实现批处理优化"""
        task = super().get()
        # 移除等待时间记录
        if task.task_id:
            self.waiting_times.pop(task.task_id, None)
        return task
    
    def get_batch(self, batch_size: int = 5) -> List[WorkflowTask]:
        """批量获取任务"""
        tasks = []
        try:
            for _ in range(batch_size):
                tasks.append(self.get())
        except asyncio.QueueEmpty:
            pass
        return tasks

class BatchWorkflowProcessor:
    """批处理工作流处理器"""
    
    def __init__(self, workflow_pool):
        self.workflow_pool = workflow_pool
        self.batch_size = 5
        self.similarity_threshold = 0.8
        
    async def process_batch(self, tasks: List[WorkflowTask]):
        """批量处理任务"""
        if len(tasks) == 1:
            # 单个任务按常规方式处理
            await self._process_single_task(tasks[0])
            return
            
        # 检查任务相似性
        similar_groups = self._group_similar_tasks(tasks)
        
        # 处理相似任务组
        for group in similar_groups:
            if len(group) > 1:
                await self._process_similar_tasks(group)
            else:
                await self._process_single_task(group[0])
    
    def _group_similar_tasks(self, tasks: List[WorkflowTask]) -> List[List[WorkflowTask]]:
        """将相似任务分组"""
        # 简单实现：基于问题类型的分组
        groups: DefaultDict[str, List[WorkflowTask]] = defaultdict(list)
        for task in tasks:
            question_type = self._classify_question_simple(task.question)
            groups[question_type].append(task)
            
        return list(groups.values())
    
    def _classify_question_simple(self, question: str) -> str:
        """简单问题分类"""
        question = question.lower()
        if 'what' in question or 'define' in question:
            return 'definition'
        elif 'how' in question or 'steps' in question:
            return 'procedure'
        elif 'compare' in question or 'difference' in question:
            return 'comparison'
        else:
            return 'general'
    
    async def _process_similar_tasks(self, tasks: List[WorkflowTask]):
        """处理相似任务组"""
        # 选取一个代表性任务进行处理
        representative_task = tasks[0]
        
        try:
            # 执行工作流
            workflow = self.workflow_pool._select_optimal_workflow(
                representative_task.question, 
                representative_task.metadata
            )
            
            config: RunnableConfig = {
                "configurable": {"thread_id": representative_task.task_id or str(uuid.uuid4())},
                "recursion_limit": 50
            }
            
            # 创建正确的状态对象
            state: Dict[str, Any] = {
                "question": representative_task.question,
                "search_query": "",
                "search_results": "",
                "research_complete": False,
                "final_answer": "",
                "images": [],  # Optional[List[dict]] - 初始化为空列表而不是 None
                "tables": [],  # Optional[List[dict]] - 初始化为空列表而不是 None
                "multimodal_content": {},  # Optional[dict] - 初始化为空字典而不是 None
                "search_strategy": "",
                "analysis_results": {},
                "agent_assignments": {},
                "iteration_count": 0,
                "previous_states": [],
                "cache_key": None,
                "reflection": None,
                "visualizations": {},
                "content_quality_score": 0.5,
                "decision_history": [],
                "recursion_depth": 0
            }
            
            result = workflow.invoke(state)
            
            # 将结果分发给所有相似任务
            for task in tasks:
                try:
                    if task.callback:
                        # 为每个任务定制结果（如果需要）
                        customized_result = self._customize_result(result, task.question)
                        if asyncio.iscoroutinefunction(task.callback):
                            await task.callback(customized_result)
                        else:
                            task.callback(customized_result)
                except Exception as e:
                    logger.error(f"执行任务 {task.task_id} 的回调时出错: {e}")
                    
        except Exception as e:
            logger.error(f"批量处理任务时出错: {e}")
            # 回退到单个任务处理
            for task in tasks:
                await self._process_single_task(task)
    
    async def _process_single_task(self, task: WorkflowTask):
        """处理单个任务（复用现有逻辑）"""
        # 这里可以复用现有_worker方法中的逻辑
        # 为避免重复代码，我们直接调用工作流池的_worker_task方法（需要创建）
        await self.workflow_pool._worker_task(task)
        
    def _customize_result(self, base_result: Dict[str, Any], question: str) -> Dict[str, Any]:
        """根据具体问题定制结果"""
        # 简单实现：返回相同结果
        # 可以根据具体需求进行定制
        customized_result = base_result.copy()
        customized_result["customized_for"] = question
        return customized_result

class WorkflowPool:
    """工作流池管理器"""
    
    def __init__(self, max_workers: int = 10, workflow_factory=None, max_queue_size: int = 1000):
        """
        初始化工作流池
        
        Args:
            max_workers: 最大工作线程数
            workflow_factory: 工作流工厂函数
            max_queue_size: 任务队列最大大小
        """
        self.max_workers = max_workers
        self.workflow_factory = workflow_factory or self._default_workflow_factory
        self.executor = ThreadPoolExecutor(max_workers=max_workers)
        self.workflows = [self.workflow_factory() for _ in range(max_workers)]
        self.task_queue = SmartTaskQueue(max_queue_size)  # 使用智能队列
        self.batch_processor = BatchWorkflowProcessor(self)  # 批处理处理器
        self.running = False
        self.workers: List[asyncio.Task] = []
        self.stats = {
            "tasks_submitted": 0,
            "tasks_completed": 0,
            "tasks_failed": 0,
            "total_processing_time": 0.0,
            "average_queue_wait_time": 0.0,
            "total_queue_wait_time": 0.0,
            "tasks_by_priority": defaultdict(int),  # 按优先级统计任务
            "batch_processed": 0  # 添加批处理统计
        }
        self.queue_wait_times = deque(maxlen=1000)  # 保存最近1000个任务的等待时间
        self.execution_times = defaultdict(lambda: deque(maxlen=100))  # 每个工作流类型的执行时间
        self.execution_times = defaultdict(lambda: deque(maxlen=100))  # 每个工作流类型的执行时间
        self._worker_tasks = set()  # 存储工作线程任务的集合
        self._lock = threading.Lock()
        # 添加动态调整参数
        self.dynamic_scaling = True  # 启用动态扩缩容
        self.min_workers = max(1, max_workers // 2)  # 最小工作线程数
        self.max_workers_limit = max_workers * 2  # 最大工作线程数限制
        self.current_workers = max_workers  # 当前工作线程数
        # 添加健康检查相关属性
        self.worker_health = {}  # 工作线程健康状态
        self.worker_last_heartbeat = {}  # 工作线程最后心跳时间
        self.heartbeat_interval = 30  # 心跳间隔（秒）
        # 添加持久化相关属性
        self.persistence_enabled = True  # 启用任务持久化
        self.persistence_file = "workflow_tasks.json"  # 持久化文件路径
        
    def _default_workflow_factory(self):
        """默认工作流工厂"""
        from src.research_core.enhanced_multi_agent_workflow import create_enhanced_multi_agent_workflow
        return create_enhanced_multi_agent_workflow()
    
    def _select_optimal_workflow(self, question: str, metadata: Optional[Dict[str, Any]] = None):
        """根据问题类型和历史数据选择最优工作流"""
        # 如果有强化学习服务，使用它来选择工作流
        try:
            from src.containers import get_reinforcement_learning_service
            rl_service = get_reinforcement_learning_service()
            if rl_service:
                optimal_workflow = rl_service.select_workflow(question, metadata)
                if optimal_workflow:
                    return optimal_workflow
        except Exception:
            pass  # 如果强化学习服务不可用，使用默认策略
            
        # 根据问题类型和元数据选择工作流
        from src.research_core.workflow_registry import get_workflow_registry
        registry = get_workflow_registry()
        
        if metadata and "workflow_type" in metadata:
            workflow_type = metadata["workflow_type"]
            if registry.workflow_exists(workflow_type):
                try:
                    return registry.get_workflow(workflow_type)
                except Exception as e:
                    logger.warning(f"指定的工作流类型 {workflow_type} 无法创建，使用默认工作流: {e}")
        
        # 根据问题长度和复杂度选择工作流
        question_length = len(question)
        if question_length < 50:  # 简单问题使用单代理工作流
            if registry.workflow_exists("single-agent"):
                try:
                    return registry.get_workflow("single-agent")
                except Exception:
                    pass  # 如果单代理工作流不可用，继续使用默认工作流
            else:
                # 如果single-agent工作流不存在，尝试使用research_assistant服务
                from src.services.service_registry import service_registry
                if service_registry.service_exists("research_assistant"):
                    # 返回research_assistant服务作为替代
                    return "research_assistant"
        elif question_length < 200:  # 中等问题使用标准多代理工作流
            if registry.workflow_exists("multi-agent"):
                try:
                    return registry.get_workflow("multi-agent")
                except Exception:
                    pass  # 如果标准多代理工作流不可用，继续使用默认工作流
        
        # 默认使用优化版工作流
        return self._default_workflow_factory()
    
    async def start(self):
        """启动工作流池"""
        if self.running:
            logger.warning("工作流池已经在运行中")
            return self.workers
        
        self.running = True
        self.workers = [
            asyncio.create_task(self._worker(i)) 
            for i in range(self.max_workers)
        ]
        
        # 恢复之前持久化的任务
        await self.recover_tasks()
        
        logger.info(f"工作流池已启动，{self.max_workers} 个工作线程")
        return self.workers
    
    async def stop(self):
        """停止工作流池"""
        if not self.running:
            return
            
        self.running = False
        
        # 等待所有任务完成
        if self.workers:
            await asyncio.gather(*self.workers, return_exceptions=True)
        
        # 关闭执行器
        self.executor.shutdown(wait=False)
        
        logger.info("工作流池已停止")
    
    async def _worker(self, worker_id: int):
        """工作线程"""
        logger.info(f"工作线程 {worker_id} 已启动")
        
        # 初始化健康状态
        with self._lock:
            self.worker_health[worker_id] = True
            self.worker_last_heartbeat[worker_id] = time.time()
        
        try:
            while self.running:
                try:
                    # 更新心跳
                    with self._lock:
                        self.worker_last_heartbeat[worker_id] = time.time()
                    
                    # 从优先级队列中获取任务
                    task = self.task_queue.get()
                    
                    # 记录队列等待时间
                    submit_time: float = task.submit_time if task.submit_time is not None else time.time()
                    queue_wait_time = time.time() - submit_time
                    self.queue_wait_times.append(queue_wait_time)
                    self.stats["total_queue_wait_time"] += queue_wait_time
                    
                    # 更新任务统计
                    self.stats["tasks_by_priority"][task.priority] += 1
                    
                    start_time = time.time()
                    
                    try:
                        # 选择合适的工作流实例
                        workflow = self._select_optimal_workflow(task.question, task.metadata)
                        
                        # 执行工作流
                        config: RunnableConfig = {
                            "configurable": {"thread_id": task.task_id},
                            "recursion_limit": 50  # 增加递归限制，避免复杂任务中断
                        }
                        
                        # 创建正确的状态对象
                        # 注意：OptimizedMultiAgentState是TypedDict的扩展，应该以字典形式初始化
                        from typing import cast
                        state = {
                            "question": task.question,
                            "search_query": "",
                            "search_results": "",
                            "research_complete": False,
                            "final_answer": "",
                            "images": [],  # Optional[List[dict]] - 初始化为空列表而不是 None
                            "tables": [],  # Optional[List[dict]] - 初始化为空列表而不是 None
                            "multimodal_content": {},  # Optional[dict] - 初始化为空字典而不是 None
                            "search_strategy": "",
                            "analysis_results": {},
                            "agent_assignments": {},
                            "iteration_count": 0,
                            "previous_states": [],
                            "cache_key": None,
                            "reflection": None,
                            "visualizations": {},
                            "content_quality_score": 0.5,
                            "decision_history": [],
                            "recursion_depth": 0
                        }
                        
                        # 执行工作流（带超时控制）
                        try:
                            if task.timeout:
                                def _invoke_workflow():
                                    return workflow.invoke(state)  # type: ignore
                                    
                                result = await asyncio.wait_for(
                                    asyncio.get_event_loop().run_in_executor(
                                        None,
                                        _invoke_workflow
                                    ),
                                    timeout=task.timeout
                                )
                            else:
                                result = workflow.invoke(state)  # type: ignore
                            
                            # 调用回调函数
                            if task.callback:
                                if asyncio.iscoroutinefunction(task.callback):
                                    await task.callback(result)  # type: ignore
                                else:
                                    task.callback(result)  # type: ignore
                            
                            # 更新统计信息
                            execution_time = time.time() - start_time
                            with self._lock:
                                self.stats["tasks_completed"] += 1
                                self.stats["total_processing_time"] += execution_time
                                self.execution_times[task.workflow_type].append(execution_time)
                                
                        except asyncio.TimeoutError:
                            logger.warning(f"任务 {task.task_id} 超时")
                            with self._lock:
                                self.stats["tasks_failed"] += 1
                            if task.callback:
                                await self._call_error_callback(task, "Task timeout")
                        except Exception as e:
                            logger.error(f"任务 {task.task_id} 执行失败: {e}")
                            with self._lock:
                                self.stats["tasks_failed"] += 1
                            if task.callback:
                                await self._call_error_callback(task, str(e))  # type: ignore
                                
                    except Exception as e:
                        logger.error(f"工作线程 {worker_id} 处理任务 {task.task_id} 时出错: {e}")
                        with self._lock:
                            self.stats["tasks_failed"] += 1
                        if task.callback:
                            await self._call_error_callback(task, str(e))
                            
                except asyncio.QueueEmpty:
                    # 队列为空时短暂休眠
                    await asyncio.sleep(0.01)
                except Exception as e:
                    logger.error(f"工作线程 {worker_id} 发生错误: {e}")
                    # 标记工作线程为不健康
                    with self._lock:
                        self.worker_health[worker_id] = False
                        
        finally:
            logger.info(f"工作线程 {worker_id} 已停止")
    
    async def _call_error_callback(self, task: WorkflowTask, error: str):
        """调用错误回调"""
        try:
            if task.callback:
                error_result = {"error": error, "task_id": task.task_id}
                if asyncio.iscoroutinefunction(task.callback):
                    await task.callback(error_result)  # type: ignore
                else:
                    task.callback(error_result)  # type: ignore
        except Exception as e:
            logger.error(f"调用错误回调失败: {e}")
    
    async def submit(self, question: str, 
                    callback: Optional[Union[Callable[[Any], None], Callable[[Any], Awaitable[None]]]] = None,
                    metadata: Optional[Dict[str, Any]] = None) -> str:
        """
        提交任务到工作流池
        
        Args:
            question: 研究问题
            callback: 回调函数
            metadata: 附加元数据
            
        Returns:
            str: 任务ID
        """
        if not self.running:
            raise RuntimeError("工作流池未运行")
            
        # 从元数据中提取任务配置
        priority = metadata.get("priority", 0) if metadata else 0
        timeout = metadata.get("timeout", 300) if metadata else 300
        workflow_type = metadata.get("workflow_type", "optimized") if metadata else "optimized"
        persistent = metadata.get("persistent", False) if metadata else False
        
        # 创建任务
        task = WorkflowTask(
            question=question,
            callback=callback,
            metadata=metadata,
            priority=priority,
            timeout=timeout,
            workflow_type=workflow_type,
            persistent=persistent
        )
        
        # 持久化任务（如果需要）
        if persistent and self.persistence_enabled:
            await self.persist_task_state(task.task_id, {"status": "queued", "question": question})
        
        # 添加到队列
        self.task_queue.put(task)
        
        # 更新统计信息
        with self._lock:
            self.stats["tasks_submitted"] += 1
            
        logger.info(f"任务 {task.task_id} 已提交: {question[:50]}...")
        return task.task_id

    async def submit_batch(self, questions: List[str], 
                          callback: BatchCallback = None,
                          metadata_list: Optional[List[Dict[str, Any]]] = None) -> List[str]:
        """
        批量提交任务
        
        Args:
            questions: 问题列表
            callback: 批量回调函数
            metadata_list: 元数据列表
            
        Returns:
            List[str]: 任务ID列表
        """
        """
        批量提交任务
        
        Args:
            questions: 问题列表
            callback: 批量回调函数
            metadata_list: 元数据列表
            
        Returns:
            List[str]: 任务ID列表
        """
        if not self.running:
            raise RuntimeError("工作流池未运行")
        
        task_ids = []
        callbacks = []
        
        for i, question in enumerate(questions):
            meta = metadata_list[i] if metadata_list and i < len(metadata_list) else None
            task_callback = None
            
            if callback:
                # 为每个任务创建单独的回调，用于收集结果
                async def single_callback(result, index=i):
                    callbacks.append((index, result))
                    # 如果所有任务都完成了，调用批量回调
                    if len(callbacks) == len(questions):
                        # 按索引排序结果
                        sorted_results = [None] * len(questions)
                        for idx, res in callbacks:
                            sorted_results[idx] = res
                        if asyncio.iscoroutinefunction(callback):
                            await callback(sorted_results)  # type: ignore
                        else:
                            callback(sorted_results)  # type: ignore
                
                task_callback = single_callback
            
            task_id = await self.submit(question, task_callback, meta)
            task_ids.append(task_id)
        
        return task_ids

    async def submit_with_persistence(self, question: str,
                                     callback: Optional[Union[Callable[[Any], None], Callable[[Any], Awaitable[None]]]] = None,
                                     metadata: Optional[Dict[str, Any]] = None,
                                     persist_state: bool = False) -> str:
        """
        提交带持久化的任务到工作流池
        
        Args:
            question: 研究问题
            callback: 回调函数
            metadata: 附加元数据
            persist_state: 是否持久化状态
            
        Returns:
            str: 任务ID
        """
        """
        提交带持久化的任务到工作流池
        
        Args:
            question: 研究问题
            callback: 回调函数
            metadata: 附加元数据
            persist_state: 是否持久化状态
            
        Returns:
            str: 任务ID
        """
        # 添加持久化标记到元数据
        if metadata is None:
            metadata = {}
        metadata["persist_state"] = persist_state
        
        # 调用标准提交方法
        return await self.submit(question, callback, metadata)
    
    def get_stats(self) -> Dict[str, Any]:
        """获取工作流池统计信息"""
        with self._lock:
            # 计算平均队列等待时间
            avg_queue_wait = sum(self.queue_wait_times) / len(self.queue_wait_times) if self.queue_wait_times else 0
            
            # 计算平均处理时间
            avg_processing_time = self.stats["total_processing_time"] / max(1, self.stats["tasks_completed"])
            
            # 计算每个工作流类型的平均执行时间
            workflow_avg_times = {}
            for workflow_type, times in self.execution_times.items():
                if times:
                    workflow_avg_times[workflow_type] = sum(times) / len(times)
            
            # 计算成功率
            total_tasks = self.stats["tasks_completed"] + self.stats["tasks_failed"]
            success_rate = (self.stats["tasks_completed"] / total_tasks * 100) if total_tasks > 0 else 0
            
            return {
                "running": self.running,
                "workers": self.max_workers,
                "queue_size": self.task_queue.qsize(),
                "stats": self.stats.copy(),
                "average_queue_wait_time": avg_queue_wait,
                "average_processing_time": avg_processing_time,
                "success_rate": success_rate,
                "workflow_average_times": workflow_avg_times
            }
    
    def _is_healthy(self) -> bool:
        """检查工作流池整体健康状态"""
        if not self.running:
            return False
            
        with self._lock:
            # 检查工作线程健康状态
            healthy_workers = sum(1 for healthy in self.worker_health.values() if healthy)
            total_workers = len(self.worker_health)
            
            # 如果健康工作线程少于50%，认为不健康
            if total_workers > 0 and healthy_workers / total_workers < 0.5:
                return False
                
            # 检查心跳时间
            current_time = time.time()
            for last_heartbeat in self.worker_last_heartbeat.values():
                if current_time - last_heartbeat > self.heartbeat_interval * 2:
                    return False  # 超过2个心跳周期未收到心跳
                    
            # 检查失败率
            total_tasks = self.stats["tasks_completed"] + self.stats["tasks_failed"]
            if total_tasks > 0:
                failure_rate = self.stats["tasks_failed"] / total_tasks
                if failure_rate > 0.5:  # 失败率超过50%认为不健康
                    return False
                    
            # 检查平均处理时间
            if self.stats["tasks_completed"] > 0:
                avg_processing_time = self.stats["total_processing_time"] / self.stats["tasks_completed"]
                if avg_processing_time > 300:  # 超过5分钟认为不健康
                    return False
        
        return True
    
    def _check_and_scale(self):
        """检查负载并动态调整工作线程数"""
        if not self.dynamic_scaling:
            return
            
        with self._lock:
            queue_size = self.task_queue.qsize()
            # 如果队列中有较多任务且当前工作线程数未达到上限，则扩容
            if queue_size > self.current_workers * 2 and self.current_workers < self.max_workers_limit:
                self._scale_up()
            # 如果队列几乎为空且当前工作线程数大于最小值，则缩容
            elif queue_size < self.current_workers // 2 and self.current_workers > self.min_workers:
                self._scale_down()
    
    def _scale_up(self):
        """动态扩容"""
        if self.current_workers >= self.max_workers_limit:
            return
            
        additional_workers = min(2, self.max_workers_limit - self.current_workers)
        logger.info(f"工作流池扩容: 增加 {additional_workers} 个工作线程")
        
        for i in range(additional_workers):
            worker_id = self.current_workers + i
            workflow = self.workflow_factory()
            self.workflows.append(workflow)
            task = asyncio.create_task(self._worker(worker_id))
            self.workers.append(task)
            
        self.current_workers += additional_workers
    
    def _scale_down(self):
        """动态缩容"""
        if self.current_workers <= self.min_workers:
            return
            
        reduce_workers = min(2, self.current_workers - self.min_workers)
        logger.info(f"工作流池缩容: 减少 {reduce_workers} 个工作线程")
        
        # 取消部分工作线程任务
        for i in range(reduce_workers):
            if self.workers:
                worker_task = self.workers.pop()
                worker_task.cancel()
                
        # 更新工作流实例列表
        self.workflows = self.workflows[:len(self.workflows) - reduce_workers]
        self.current_workers -= reduce_workers
    
    async def persist_task_state(self, task_id: str, state: Dict[str, Any]):
        """持久化任务状态"""
        if not self.persistence_enabled:
            return
            
        try:
            # 读取现有持久化数据
            persistent_data = {}
            try:
                with open(self.persistence_file, 'r', encoding='utf-8') as f:
                    persistent_data = json.load(f)
            except FileNotFoundError:
                pass
            
            # 更新任务状态
            persistent_data[task_id] = {
                "state": state,
                "timestamp": time.time()
            }
            
            # 写入持久化文件
            with open(self.persistence_file, 'w', encoding='utf-8') as f:
                json.dump(persistent_data, f, ensure_ascii=False, indent=2)
                
        except Exception as e:
            logger.error(f"持久化任务 {task_id} 状态失败: {e}")
    
    async def recover_tasks(self):
        """从持久化存储中恢复未完成的任务"""
        if not self.persistence_enabled:
            return
            
        try:
            # 读取持久化数据
            persistent_data = {}
            try:
                with open(self.persistence_file, 'r', encoding='utf-8') as f:
                    persistent_data = json.load(f)
            except FileNotFoundError:
                return
            
            # 恢复未完成的任务
            current_time = time.time()
            recovered_tasks = 0
            
            for task_id, data in persistent_data.items():
                # 检查任务是否过期（超过24小时）
                if current_time - data.get("timestamp", 0) < 24 * 3600:
                    # 重新提交任务
                    # 注意：这里简化处理，实际应该根据具体状态恢复任务
                    logger.info(f"恢复任务 {task_id}")
                    recovered_tasks += 1
                else:
                    logger.info(f"跳过过期任务 {task_id}")
            
            if recovered_tasks > 0:
                logger.info(f"成功恢复 {recovered_tasks} 个任务")
                
            # 清空持久化文件
            with open(self.persistence_file, 'w', encoding='utf-8') as f:
                json.dump({}, f)
                
        except Exception as e:
            logger.error(f"恢复任务失败: {e}")

    async def _worker_task(self, task: WorkflowTask):
        """处理单个工作流任务"""
        start_time = time.time()
        
        try:
            # 选择合适的工作流实例
            workflow = self._select_optimal_workflow(task.question, task.metadata)
            
            # 执行工作流
            config: RunnableConfig = {
                "configurable": {"thread_id": task.task_id},
                "recursion_limit": 50  # 增加递归限制，避免复杂任务中断
            }
            
            # 创建正确的状态对象
            # 注意：OptimizedMultiAgentState是TypedDict的扩展，应该以字典形式初始化
            from typing import cast
            state = {
                "question": task.question,
                "search_query": "",
                "search_results": "",
                "research_complete": False,
                "final_answer": "",
                "images": [],  # Optional[List[dict]] - 初始化为空列表而不是 None
                "tables": [],  # Optional[List[dict]] - 初始化为空列表而不是 None
                "multimodal_content": {},  # Optional[dict] - 初始化为空字典而不是 None
                "search_strategy": "",
                "analysis_results": {},
                "agent_assignments": {},
                "iteration_count": 0,
                "previous_states": [],
                "cache_key": None,
                "reflection": None,
                "visualizations": {},
                "content_quality_score": 0.5,
                "decision_history": [],
                "recursion_depth": 0
            }
            
            # 执行工作流（带超时控制）
            try:
                if task.timeout:
                    def _invoke_workflow():
                        return workflow.invoke(state)  # type: ignore
                        
                    result = await asyncio.wait_for(
                        asyncio.get_event_loop().run_in_executor(
                            None,
                            _invoke_workflow
                        ),
                        timeout=task.timeout
                    )
                else:
                    result = workflow.invoke(state)  # type: ignore
                
                # 调用回调函数
                if task.callback:
                    if asyncio.iscoroutinefunction(task.callback):
                        await task.callback(result)  # type: ignore
                    else:
                        task.callback(result)  # type: ignore
                
                # 更新统计信息
                execution_time = time.time() - start_time
                with self._lock:
                    self.stats["tasks_completed"] += 1
                    self.stats["total_processing_time"] += execution_time
                    self.execution_times[task.workflow_type].append(execution_time)
                    
                # 更新持久化状态
                if task.persistent and self.persistence_enabled:
                    await self.persist_task_state(task.task_id, {"status": "completed", "result": result})
                    
            except asyncio.TimeoutError:
                logger.warning(f"任务 {task.task_id} 超时")
                with self._lock:
                    self.stats["tasks_failed"] += 1
                if task.callback:
                    await self._call_error_callback(task, "Task timeout")
                # 更新持久化状态
                if task.persistent and self.persistence_enabled:
                    await self.persist_task_state(task.task_id, {"status": "failed", "error": "Task timeout"})
            except Exception as e:
                logger.error(f"任务 {task.task_id} 执行失败: {e}")
                with self._lock:
                    self.stats["tasks_failed"] += 1
                if task.callback:
                    await self._call_error_callback(task, str(e))  # type: ignore
                # 更新持久化状态
                if task.persistent and self.persistence_enabled:
                    await self.persist_task_state(task.task_id, {"status": "failed", "error": str(e)})
                    
        except Exception as e:
            logger.error(f"处理任务 {task.task_id} 时出错: {e}")
            with self._lock:
                self.stats["tasks_failed"] += 1
            if task.callback:
                await self._call_error_callback(task, str(e))
            # 更新持久化状态
            if task.persistent and self.persistence_enabled:
                await self.persist_task_state(task.task_id, {"status": "failed", "error": str(e)})

class LoadBalancer:
    """负载均衡器"""
    
    def __init__(self, pool_size: int = 3, max_workers_per_pool: int = 5):
        """
        初始化负载均衡器
        
        Args:
            pool_size: 工作流池数量
            max_workers_per_pool: 每个池的最大工作线程数
        """
        self.pools = [
            WorkflowPool(max_workers=max_workers_per_pool) 
            for _ in range(pool_size)
        ]
        self.current_pool = 0
        self.health_checks = {}  # 健康检查状态
        self.last_health_check = 0  # 上次健康检查时间
        self.health_check_interval = 30  # 健康检查间隔（秒）
        
    async def submit(self, question: str, callback: Optional[Union[Callable[[Any], None], Callable[[Any], Awaitable[None]]]] = None,
                    metadata: Optional[Dict[str, Any]] = None) -> None:
        """提交任务（轮询负载均衡）"""
        current_time = time.time()
        
        # 定期执行健康检查
        if current_time - self.last_health_check > self.health_check_interval:
            await self._perform_health_check()
            self.last_health_check = current_time
        
        # 获取健康的工作池
        healthy_pools = [i for i, pool in enumerate(self.pools) if self._is_healthy(i)]
        
        if not healthy_pools:
            # 如果没有健康的工作池，使用第一个
            pool_index = self.current_pool
            logger.warning("没有健康的工作池可用，使用默认工作池")
        else:
            # 在健康的工作池中选择（基于负载的轮询）
            pool_index = self._select_best_pool(healthy_pools)
            
        pool = self.pools[pool_index]
        self.current_pool = (self.current_pool + 1) % len(self.pools)
        await pool.submit(question, callback, metadata)
        
    def _is_healthy(self, pool_index: int) -> bool:
        """检查工作池健康状态"""
        pool = self.pools[pool_index]
        return pool._is_healthy()
    
    def _select_best_pool(self, healthy_pools: List[int]) -> int:
        """选择最佳工作池（基于负载）"""
        if not healthy_pools:
            return self.current_pool
            
        # 选择队列最小的工作池
        best_pool = min(healthy_pools, key=lambda i: self.pools[i].task_queue.qsize())
        return best_pool
    
    async def _perform_health_check(self):
        """执行健康检查"""
        for i, pool in enumerate(self.pools):
            is_healthy = pool._is_healthy()
            self.health_checks[i] = is_healthy
            if not is_healthy:
                logger.warning(f"工作池 {i} 健康检查失败")
    
    async def start_all(self):
        """启动所有工作流池"""
        for pool in self.pools:
            await pool.start()
    
    async def stop_all(self):
        """停止所有工作流池"""
        for pool in self.pools:
            await pool.stop()
    
    def get_stats(self) -> List[Dict[str, Any]]:
        """获取所有池的统计信息"""
        return [pool.get_stats() for pool in self.pools]

# 全局工作流池实例
global_workflow_pool = WorkflowPool(max_workers=10)

@track_performance("workflow_pool")
async def submit_research_task(question: str, callback: Optional[Union[Callable[[Any], None], Callable[[Any], Awaitable[None]]]] = None,
                              metadata: Optional[Dict[str, Any]] = None) -> None:
    """
    提交研究任务（全局接口）
    
    Args:
        question: 研究问题
        callback: 回调函数
        metadata: 附加元数据
    """
    await global_workflow_pool.submit(question, callback, metadata)

@track_performance("workflow_pool_batch")
async def submit_batch_tasks(questions: List[str], 
                            callback: Optional[Union[Callable[[List[Any]], None], Callable[[List[Any]], Awaitable[None]]]] = None,
                            metadata_list: Optional[List[Dict[str, Any]]] = None) -> None:
    """
    批量提交研究任务
    
    Args:
        questions: 问题列表
        callback: 批量回调函数
        metadata_list: 元数据列表
    """
    await global_workflow_pool.submit_batch(questions, callback, metadata_list)

async def initialize_workflow_pool():
    """初始化全局工作流池"""
    await global_workflow_pool.start()
    logger.info("全局工作流池已初始化")

async def shutdown_workflow_pool():
    """关闭全局工作流池"""
    await global_workflow_pool.stop()
    logger.info("全局工作流池已关闭")