"""
任务执行器
"""
import asyncio
import concurrent.futures
from typing import Dict, Any, List, Set
from core.dag import DAG
from core.state_manager import StateManager, TaskStatus
from core.plugin_manager import PluginManager
from core.resource_manager import ResourceManager
from core.logger import get_logger

# 获取日志记录器
logger = get_logger(__name__)


class PipelineExecutor:
    """Pipeline执行器"""

    def __init__(self, dag: DAG, state_manager: StateManager, plugin_manager: PluginManager,
                 max_concurrent_tasks: int = None, pipeline_timeout: float = None):
        self.dag = dag
        self.state_manager = state_manager
        self.plugin_manager = plugin_manager
        
        # 从全局配置或参数获取并发任务数
        if max_concurrent_tasks is not None:
            self.max_concurrent_tasks = max_concurrent_tasks
        elif dag.global_config.get("max_concurrent_tasks") is not None:
            self.max_concurrent_tasks = dag.global_config.get("max_concurrent_tasks")
        else:
            self.max_concurrent_tasks = 5  # 默认值
            
        # 从全局配置或参数获取超时时间
        if pipeline_timeout is not None:
            self.pipeline_timeout = pipeline_timeout
        elif dag.global_config.get("pipeline_timeout") is not None:
            self.pipeline_timeout = dag.global_config.get("pipeline_timeout")
        else:
            self.pipeline_timeout = 300.0  # 默认值
            
        # 初始化资源管理器
        self.resource_manager = ResourceManager(dag.global_config)
            
        self._running_tasks = set()

    async def execute(self) -> bool:
        """执行整个pipeline"""
        self.state_manager.set_pipeline_status(TaskStatus.RUNNING)

        try:
            completed_nodes = self.state_manager.get_completed_nodes()

            if self.pipeline_timeout is not None:
                # 添加超时保护
                await asyncio.wait_for(
                    self._execute_pipeline_logic(completed_nodes),
                    timeout=self.pipeline_timeout
                )
            else:
                await self._execute_pipeline_logic(completed_nodes)

            self.state_manager.set_pipeline_status(TaskStatus.COMPLETED)
            return True

        except asyncio.TimeoutError:
            logger.error(f"Pipeline execution timed out after {self.pipeline_timeout} seconds")
            self.state_manager.set_pipeline_status(TaskStatus.FAILED)
            # 取消所有正在运行的任务
            for task in self._running_tasks:
                if not task.done():
                    task.cancel()
            return False
        except Exception as e:
            logger.error(f"Pipeline execution failed: {e}")
            self.state_manager.set_pipeline_status(TaskStatus.FAILED)
            # 取消所有正在运行的任务
            for task in self._running_tasks:
                if not task.done():
                    task.cancel()
            return False

    async def _execute_pipeline_logic(self, completed_nodes: set) -> None:
        """实际的pipeline执行逻辑"""
        while not self.dag.is_complete(completed_nodes):
            ready_nodes = self.dag.get_ready_nodes(completed_nodes)

            if not ready_nodes:
                # 没有可执行的节点，等待一下
                await asyncio.sleep(0.1)
                completed_nodes = self.state_manager.get_completed_nodes()
                continue

            # 为每个ready节点创建异步任务（限制并发数）
            available_slots = self.max_concurrent_tasks - len(self._running_tasks)
            nodes_to_execute = ready_nodes[:available_slots]

            for node_id in nodes_to_execute:
                if node_id not in completed_nodes and node_id not in [task.get_name() for task in self._running_tasks]:
                    task = asyncio.create_task(
                        self._execute_node_async(node_id),
                        name=node_id
                    )
                    self._running_tasks.add(task)
                    task.add_done_callback(lambda t: self._running_tasks.discard(t))

            # 等待至少一个任务完成或超时
            if self._running_tasks:
                try:
                    done, pending = await asyncio.wait(
                        self._running_tasks,
                        return_when=asyncio.FIRST_COMPLETED,
                        timeout=1.0
                    )
                except asyncio.CancelledError:
                    # 任务被取消，直接退出
                    break

            # 更新已完成的节点
            completed_nodes = self.state_manager.get_completed_nodes()

    async def _execute_node_async(self, node_id: str) -> bool:
        """异步执行单个节点"""
        loop = asyncio.get_event_loop()
        # 在线程池中执行CPU/IO密集型任务
        try:
            with concurrent.futures.ThreadPoolExecutor(max_workers=1) as executor:
                result = await loop.run_in_executor(executor, self._execute_node_sync, node_id)
                return result
        except asyncio.CancelledError:
            logger.info(f"Node {node_id} execution was cancelled")
            self.state_manager.set_node_status(node_id, TaskStatus.FAILED, error="Execution cancelled")
            raise
        except Exception as e:
            logger.error(f"Node {node_id} async execution failed: {e}")
            return False

    def _execute_node_sync(self, node_id: str) -> bool:
        """同步执行单个节点（在线程池中运行）"""
        try:
            self.state_manager.set_node_status(node_id, TaskStatus.RUNNING)

            node_config = self.dag.get_node_config(node_id)
            plugin_type = node_config["plugin_type"]
            plugin_config = node_config.get("config", {})
            
            # 获取节点资源配置
            node_resources = self.resource_manager.get_node_resources(node_id, node_config)
            
            # 应用资源限制
            self.resource_manager.apply_resource_limits(node_id, node_resources)

            plugin = self.plugin_manager.create_plugin(plugin_type, node_id, plugin_config)

            input_data = self._prepare_input_data(node_id)

            result = plugin.execute(input_data)
            
            # 记录资源使用情况（示例）
            self.resource_manager.track_resource_usage(node_id, {
                "status": "completed",
                "resources": node_resources
            })

            self.state_manager.set_node_status(node_id, TaskStatus.COMPLETED, result=result)

            logger.info(f"Node {node_id} completed successfully with resources: CPU={node_resources.get('cpu_limit')}, "
                        f"Memory={node_resources.get('memory_limit')}, Threads={node_resources.get('thread_limit')}")
            return True

        except Exception as e:
            logger.error(f"Node {node_id} execution failed: {e}")
            self.state_manager.set_node_status(node_id, TaskStatus.FAILED, error=e)
            
            # 记录资源使用情况（示例）
            node_config = self.dag.get_node_config(node_id)
            node_resources = self.resource_manager.get_node_resources(node_id, node_config)
            self.resource_manager.track_resource_usage(node_id, {
                "status": "failed",
                "error": str(e),
                "resources": node_resources
            })
            
            return False

    def _prepare_input_data(self, node_id: str) -> Any:
        """准备节点输入数据"""
        dependencies = self.dag.get_dependencies(node_id)

        if not dependencies:
            return None

        if len(dependencies) == 1:
            return self.state_manager.get_node_result(dependencies[0])

        input_data = {}
        for dep in dependencies:
            input_data[dep] = self.state_manager.get_node_result(dep)
        return input_data