"""
工作流执行器

实现工作流的执行逻辑和调度。
"""

import asyncio
from typing import Dict, Any, List, Optional, Set, Callable
from datetime import datetime
from enum import Enum
import logging

from .workflow import WorkflowDefinition
from .context import WorkflowContext
from .nodes import WorkflowNode, NodeStatus, NodeType


class ExecutionStatus(Enum):
    """执行状态枚举"""
    PENDING = "pending"          # 等待执行
    RUNNING = "running"          # 正在执行
    COMPLETED = "completed"      # 执行完成
    FAILED = "failed"           # 执行失败
    CANCELLED = "cancelled"     # 取消执行
    PAUSED = "paused"           # 暂停执行


class ExecutionMode(Enum):
    """执行模式枚举"""
    SEQUENTIAL = "sequential"    # 顺序执行
    PARALLEL = "parallel"       # 并行执行
    MIXED = "mixed"             # 混合执行


class WorkflowExecutor:
    """
    工作流执行器
    
    负责执行工作流定义，管理节点调度和状态。
    """
    
    def __init__(self,
                 max_concurrent_nodes: int = 10,
                 execution_timeout: float = 3600,
                 retry_failed_nodes: bool = True,
                 max_retries: int = 3):
        self.max_concurrent_nodes = max_concurrent_nodes
        self.execution_timeout = execution_timeout
        self.retry_failed_nodes = retry_failed_nodes
        self.max_retries = max_retries
        
        # 执行状态
        self.status = ExecutionStatus.PENDING
        self.current_workflow: Optional[WorkflowDefinition] = None
        self.current_context: Optional[WorkflowContext] = None
        
        # 执行控制
        self.running_nodes: Set[str] = set()
        self.completed_nodes: Set[str] = set()
        self.failed_nodes: Set[str] = set()
        self.cancelled_nodes: Set[str] = set()
        
        # 事件回调
        self.on_workflow_start: Optional[Callable] = None
        self.on_workflow_complete: Optional[Callable] = None
        self.on_workflow_failed: Optional[Callable] = None
        self.on_node_start: Optional[Callable] = None
        self.on_node_complete: Optional[Callable] = None
        self.on_node_failed: Optional[Callable] = None
        
        # 日志
        self.logger = logging.getLogger("workflow.executor")
        
        # 执行统计
        self.execution_stats = {
            'total_executions': 0,
            'successful_executions': 0,
            'failed_executions': 0,
            'cancelled_executions': 0
        }
    
    async def execute_workflow(self,
                             workflow: WorkflowDefinition,
                             initial_data: Dict[str, Any] = None,
                             context: WorkflowContext = None) -> Dict[str, Any]:
        """
        执行工作流
        
        Args:
            workflow: 工作流定义
            initial_data: 初始数据
            context: 执行上下文（可选）
            
        Returns:
            执行结果
        """
        # 验证工作流
        errors = workflow.validate()
        if errors:
            raise ValueError(f"工作流验证失败: {errors}")
        
        # 创建或使用提供的上下文
        if context is None:
            context = WorkflowContext(
                workflow_id=workflow.workflow_id,
                initial_data=initial_data or {},
                config=workflow.config
            )
        else:
            if initial_data:
                context.update_data(initial_data)
        
        # 设置执行状态
        self.current_workflow = workflow
        self.current_context = context
        self.status = ExecutionStatus.RUNNING
        
        # 重置节点状态
        workflow.reset_all_nodes()
        self._reset_execution_state()
        
        # 触发开始事件
        if self.on_workflow_start:
            await self._safe_callback(self.on_workflow_start, workflow, context)
        
        self.logger.info(f"开始执行工作流: {workflow.name}")
        
        try:
            # 执行工作流
            result = await self._execute_workflow_internal(workflow, context)
            
            # 更新统计
            self.execution_stats['total_executions'] += 1
            self.execution_stats['successful_executions'] += 1
            
            self.status = ExecutionStatus.COMPLETED
            self.logger.info(f"工作流执行完成: {workflow.name}")
            
            # 触发完成事件
            if self.on_workflow_complete:
                await self._safe_callback(self.on_workflow_complete, workflow, context, result)
            
            return result
            
        except asyncio.CancelledError:
            self.status = ExecutionStatus.CANCELLED
            self.execution_stats['total_executions'] += 1
            self.execution_stats['cancelled_executions'] += 1
            self.logger.info(f"工作流执行被取消: {workflow.name}")
            raise
            
        except Exception as e:
            self.status = ExecutionStatus.FAILED
            self.execution_stats['total_executions'] += 1
            self.execution_stats['failed_executions'] += 1
            
            context.add_error("workflow", str(e), "execution_error")
            self.logger.error(f"工作流执行失败: {workflow.name} - {e}")
            
            # 触发失败事件
            if self.on_workflow_failed:
                await self._safe_callback(self.on_workflow_failed, workflow, context, e)
            
            raise
        
        finally:
            self.current_workflow = None
            self.current_context = None
    
    async def _execute_workflow_internal(self,
                                       workflow: WorkflowDefinition,
                                       context: WorkflowContext) -> Dict[str, Any]:
        """内部工作流执行逻辑"""
        # 获取执行顺序
        execution_levels = workflow.get_execution_order()
        
        if not execution_levels:
            raise RuntimeError("无法确定工作流执行顺序")
        
        self.logger.debug(f"执行层级: {len(execution_levels)}")
        
        # 按层级执行
        for level_index, level_nodes in enumerate(execution_levels):
            self.logger.debug(f"执行层级 {level_index + 1}: {[node.name for node in level_nodes]}")
            
            # 并行执行当前层级的所有节点
            await self._execute_level(level_nodes, context)
            
            # 检查是否有失败的节点
            failed_in_level = [node for node in level_nodes if node.status == NodeStatus.FAILED]
            if failed_in_level and not workflow.config.get('continue_on_error', False):
                failed_names = [node.name for node in failed_in_level]
                raise RuntimeError(f"层级 {level_index + 1} 中的节点执行失败: {failed_names}")
        
        # 收集最终结果
        return self._collect_final_results(workflow, context)
    
    async def _execute_level(self, nodes: List[WorkflowNode], context: WorkflowContext) -> None:
        """执行一个层级的节点"""
        # 过滤出准备好执行的节点
        ready_nodes = [node for node in nodes if node.is_ready and not node.is_finished]
        
        if not ready_nodes:
            return
        
        # 创建执行任务
        tasks = []
        semaphore = asyncio.Semaphore(self.max_concurrent_nodes)
        
        for node in ready_nodes:
            task = asyncio.create_task(
                self._execute_node_with_semaphore(node, context, semaphore)
            )
            tasks.append(task)
        
        # 等待所有任务完成
        if tasks:
            await asyncio.gather(*tasks, return_exceptions=True)
    
    async def _execute_node_with_semaphore(self,
                                         node: WorkflowNode,
                                         context: WorkflowContext,
                                         semaphore: asyncio.Semaphore) -> None:
        """使用信号量控制并发执行节点"""
        async with semaphore:
            await self._execute_single_node(node, context)
    
    async def _execute_single_node(self, node: WorkflowNode, context: WorkflowContext) -> None:
        """执行单个节点"""
        node_id = node.node_id
        
        try:
            # 检查节点是否准备好
            if not node.is_ready:
                self.logger.warning(f"节点未准备好: {node.name}")
                return
            
            # 检查节点是否已完成
            if node.is_finished:
                self.logger.debug(f"节点已完成: {node.name}")
                return
            
            # 添加到运行中节点集合
            self.running_nodes.add(node_id)
            context.set_current_node(node_id)
            
            # 触发节点开始事件
            if self.on_node_start:
                await self._safe_callback(self.on_node_start, node, context)
            
            self.logger.debug(f"开始执行节点: {node.name}")
            
            # 执行节点
            start_time = datetime.utcnow()
            result = await asyncio.wait_for(
                node.execute(context),
                timeout=self.execution_timeout
            )
            end_time = datetime.utcnow()
            
            # 记录执行结果
            duration = (end_time - start_time).total_seconds()
            context.add_execution_record(
                node_id=node_id,
                node_name=node.name,
                status='completed',
                duration=duration,
                data=result
            )
            
            # 添加到完成节点集合
            self.completed_nodes.add(node_id)
            
            # 触发节点完成事件
            if self.on_node_complete:
                await self._safe_callback(self.on_node_complete, node, context, result)
            
            self.logger.debug(f"节点执行完成: {node.name} ({duration:.3f}s)")
            
        except asyncio.TimeoutError:
            error_msg = f"节点执行超时: {node.name}"
            node.fail_execution(error_msg)
            self.failed_nodes.add(node_id)
            context.add_error(node_id, error_msg, "timeout_error")
            
            # 触发节点失败事件
            if self.on_node_failed:
                await self._safe_callback(self.on_node_failed, node, context, error_msg)
            
            self.logger.error(error_msg)
            
        except asyncio.CancelledError:
            node.cancel_execution()
            self.cancelled_nodes.add(node_id)
            context.add_execution_record(
                node_id=node_id,
                node_name=node.name,
                status='cancelled'
            )
            self.logger.info(f"节点执行被取消: {node.name}")
            raise
            
        except Exception as e:
            error_msg = str(e)
            node.fail_execution(error_msg)
            self.failed_nodes.add(node_id)
            
            # 记录错误
            context.add_error(node_id, error_msg, "execution_error")
            context.add_execution_record(
                node_id=node_id,
                node_name=node.name,
                status='failed',
                error=error_msg
            )
            
            # 触发节点失败事件
            if self.on_node_failed:
                await self._safe_callback(self.on_node_failed, node, context, e)
            
            self.logger.error(f"节点执行失败: {node.name} - {error_msg}")
            
            # 如果启用重试
            if self.retry_failed_nodes and hasattr(node, 'retry_count'):
                retry_count = getattr(node, 'retry_count', 0)
                if retry_count < self.max_retries:
                    self.logger.info(f"重试节点: {node.name} (第 {retry_count + 1} 次)")
                    setattr(node, 'retry_count', retry_count + 1)
                    node.reset()
                    await asyncio.sleep(1)  # 等待一秒后重试
                    await self._execute_single_node(node, context)
                    return
            
            # 如果不继续执行错误，抛出异常
            if not self.current_workflow.config.get('continue_on_error', False):
                raise
        
        finally:
            # 从运行中节点集合移除
            self.running_nodes.discard(node_id)
    
    def _collect_final_results(self,
                             workflow: WorkflowDefinition,
                             context: WorkflowContext) -> Dict[str, Any]:
        """收集最终执行结果"""
        # 获取所有输出节点的结果
        output_nodes = workflow.get_nodes_by_type(NodeType.OUTPUT)
        
        final_results = {}
        
        # 收集输出节点的数据
        for output_node in output_nodes:
            if output_node.status == NodeStatus.COMPLETED:
                node_output = output_node.get_output_data()
                final_results[output_node.name] = node_output
        
        # 如果没有输出节点，收集所有出口节点的数据
        if not final_results:
            exit_nodes = workflow.get_exit_nodes()
            for exit_node in exit_nodes:
                if exit_node.status == NodeStatus.COMPLETED:
                    node_output = exit_node.get_output_data()
                    final_results[exit_node.name] = node_output
        
        # 添加执行摘要
        execution_summary = context.get_execution_summary()
        
        return {
            'workflow_id': workflow.workflow_id,
            'workflow_name': workflow.name,
            'execution_status': self.status.value,
            'results': final_results,
            'execution_summary': execution_summary,
            'context_data': context.get_all_data()
        }
    
    def _reset_execution_state(self) -> None:
        """重置执行状态"""
        self.running_nodes.clear()
        self.completed_nodes.clear()
        self.failed_nodes.clear()
        self.cancelled_nodes.clear()
    
    async def _safe_callback(self, callback: Callable, *args, **kwargs) -> None:
        """安全执行回调函数"""
        try:
            if asyncio.iscoroutinefunction(callback):
                await callback(*args, **kwargs)
            else:
                callback(*args, **kwargs)
        except Exception as e:
            self.logger.error(f"回调函数执行失败: {e}")
    
    def get_execution_status(self) -> Dict[str, Any]:
        """获取执行状态"""
        return {
            'status': self.status.value,
            'current_workflow': self.current_workflow.name if self.current_workflow else None,
            'running_nodes': len(self.running_nodes),
            'completed_nodes': len(self.completed_nodes),
            'failed_nodes': len(self.failed_nodes),
            'cancelled_nodes': len(self.cancelled_nodes),
            'execution_stats': self.execution_stats.copy()
        }
    
    def cancel_execution(self) -> None:
        """取消当前执行"""
        if self.status == ExecutionStatus.RUNNING:
            self.status = ExecutionStatus.CANCELLED
            self.logger.info("工作流执行被取消")
    
    def pause_execution(self) -> None:
        """暂停执行（暂未实现）"""
        # TODO: 实现暂停功能
        pass
    
    def resume_execution(self) -> None:
        """恢复执行（暂未实现）"""
        # TODO: 实现恢复功能
        pass
