"""
主查询执行器
整合所有组件，提供统一的查询执行接口
"""

import uuid
import time
from typing import Dict, List, Any, Optional, Iterator
import threading

from .types import (
    Record, RecordBatch, ExecutionContext, ExecutionStats,
    OperatorType, JoinType, AggregateFunction, SortDirection,
    QueryExecutionError, TimeoutException
)
from .query_plan import QueryPlan, ExecutionNode, PlanNode
from .executor_base import ExecutorBase, ExecutionPipeline
from .memory_manager import get_memory_manager
from .transaction_manager import get_transaction_manager
from .statistics import get_statistics

# 导入所有操作符
from .operators.scan_operators import TableScanOperator, IndexScanOperator
from .operators.join_operators import HashJoinOperator, NestedLoopJoinOperator, SortMergeJoinOperator
from .operators.filter_operators import FilterOperator
from .operators.project_operators import ProjectOperator
from .operators.aggregate_operators import HashAggregateOperator, SortAggregateOperator
from .operators.sort_operators import SortOperator
from .operators.limit_operators import LimitOperator
from .operators.set_operators import UnionOperator, IntersectOperator, ExceptOperator
from .operators.ddl_operators import CreateTableOperator, DropTableOperator, InsertOperator, UpdateOperator, DeleteOperator, CreateDatabaseOperator, DropDatabaseOperator, UseDatabaseOperator, ListDatabasesOperator, CreateUserOperator, DropUserOperator
from .operators.grant_operators import GrantOperator, RevokeOperator

class QueryExecutor:
    """主查询执行器"""
    
    def __init__(self, storage_engine=None):
        self.memory_manager = get_memory_manager()
        self.transaction_manager = get_transaction_manager()
        self.statistics = get_statistics()
        self.storage_engine = storage_engine
        self.active_queries: Dict[str, ExecutionPipeline] = {}
        self.lock = threading.RLock()
    
    def execute_plan(self, query_plan: QueryPlan, 
                    context: ExecutionContext = None) -> Iterator[RecordBatch]:
        """执行查询计划"""
        if not query_plan.root_node:
            raise QueryExecutionError("Query plan has no root node")
        
        # 创建执行上下文
        if context is None:
            context = ExecutionContext()
        
        query_id = str(uuid.uuid4())
        
        try:
            # 记录查询开始
            self.statistics.record_query_start(query_id, "SELECT")
            
            # 验证查询计划
            validation_errors = query_plan.validate()
            if validation_errors:
                raise QueryExecutionError(f"Invalid query plan: {'; '.join(validation_errors)}")
            
            # 构建执行树
            execution_tree = self._build_execution_tree(query_plan.root_node, context)
            
            # 创建执行管道
            pipeline = ExecutionPipeline(f"pipeline_{query_id}", context)
            pipeline.add_executor(execution_tree)
            
            # 注册活动查询
            with self.lock:
                self.active_queries[query_id] = pipeline
            
            # 执行查询
            total_rows_returned = 0 # 返回的行数
            for batch in pipeline.execute():
                total_rows_returned += len(batch)
                yield batch
            
            # 更新统计信息
            self.statistics.update_query_stats(query_id, rows_returned=total_rows_returned)
            self.statistics.record_query_end(query_id, "completed")
            
        except Exception as e:
            # 记录查询失败
            self.statistics.record_query_end(query_id, "failed", str(e))
            raise QueryExecutionError(f"Query execution failed: {e}") from e
        
        finally:
            # 清理活动查询
            with self.lock:
                if query_id in self.active_queries:
                    del self.active_queries[query_id]
    
    def _build_execution_tree(self, plan_node: PlanNode, 
                             context: ExecutionContext) -> ExecutorBase:
        """构建执行树"""
        operator_type = plan_node.operator_type
        executor_id = f"exec_{plan_node.node_id}"
        
        # 递归构建子执行器
        child_executors = []
        for child_node in plan_node.children:
            child_executor = self._build_execution_tree(child_node, context)
            child_executors.append(child_executor)
        
        # 根据操作符类型创建执行器
        if operator_type == OperatorType.TABLE_SCAN:
            return self._create_table_scan_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.INDEX_SCAN:
            return self._create_index_scan_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.FILTER:
            if len(child_executors) != 1:
                raise QueryExecutionError("Filter operator requires exactly one child")
            return self._create_filter_executor(executor_id, context, plan_node, child_executors[0])
        
        elif operator_type == OperatorType.PROJECT:
            if len(child_executors) != 1:
                raise QueryExecutionError("Project operator requires exactly one child")
            return self._create_project_executor(executor_id, context, plan_node, child_executors[0])
        
        elif operator_type == OperatorType.HASH_JOIN:
            if len(child_executors) != 2:
                raise QueryExecutionError("Join operator requires exactly two children")
            return self._create_hash_join_executor(executor_id, context, plan_node, 
                                                 child_executors[0], child_executors[1])
        
        elif operator_type == OperatorType.NESTED_LOOP_JOIN:
            if len(child_executors) != 2:
                raise QueryExecutionError("Join operator requires exactly two children")
            return self._create_nested_loop_join_executor(executor_id, context, plan_node,
                                                        child_executors[0], child_executors[1])
        
        elif operator_type == OperatorType.SORT_MERGE_JOIN:
            if len(child_executors) != 2:
                raise QueryExecutionError("Join operator requires exactly two children")
            return self._create_sort_merge_join_executor(executor_id, context, plan_node,
                                                       child_executors[0], child_executors[1])
        
        elif operator_type == OperatorType.HASH_AGGREGATE:
            if len(child_executors) != 1:
                raise QueryExecutionError("Aggregate operator requires exactly one child")
            return self._create_hash_aggregate_executor(executor_id, context, plan_node, child_executors[0])
        
        elif operator_type == OperatorType.SORT_AGGREGATE:
            if len(child_executors) != 1:
                raise QueryExecutionError("Aggregate operator requires exactly one child")
            return self._create_sort_aggregate_executor(executor_id, context, plan_node, child_executors[0])
        
        elif operator_type == OperatorType.SORT:
            if len(child_executors) != 1:
                raise QueryExecutionError("Sort operator requires exactly one child")
            return self._create_sort_executor(executor_id, context, plan_node, child_executors[0])
        
        elif operator_type == OperatorType.LIMIT:
            if len(child_executors) != 1:
                raise QueryExecutionError("Limit operator requires exactly one child")
            return self._create_limit_executor(executor_id, context, plan_node, child_executors[0])
        
        elif operator_type == OperatorType.UNION:
            if len(child_executors) != 2:
                raise QueryExecutionError("Union operator requires exactly two children")
            return self._create_union_executor(executor_id, context, plan_node,
                                             child_executors[0], child_executors[1])
        
        elif operator_type == OperatorType.INTERSECT:
            if len(child_executors) != 2:
                raise QueryExecutionError("Intersect operator requires exactly two children")
            return self._create_intersect_executor(executor_id, context, plan_node,
                                                 child_executors[0], child_executors[1])
        
        elif operator_type == OperatorType.EXCEPT:
            if len(child_executors) != 2:
                raise QueryExecutionError("Except operator requires exactly two children")
            return self._create_except_executor(executor_id, context, plan_node,
                                              child_executors[0], child_executors[1])
        
        # DDL操作
        elif operator_type == OperatorType.CREATE_TABLE:
            return self._create_create_table_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.DROP_TABLE:
            return self._create_drop_table_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.INSERT:
            return self._create_insert_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.UPDATE:
            return self._create_update_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.DELETE:
            return self._create_delete_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.GRANT:
            return self._create_grant_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.REVOKE:
            return self._create_revoke_executor(executor_id, context, plan_node)
        
        # Database级DDL操作
        elif operator_type == OperatorType.CREATE_DATABASE:
            return self._create_create_database_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.DROP_DATABASE:
            return self._create_drop_database_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.USE_DATABASE:
            return self._create_use_database_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.LIST_DATABASES:
            return self._create_list_databases_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.CREATE_USER:
            return self._create_create_user_executor(executor_id, context, plan_node)
        
        elif operator_type == OperatorType.DROP_USER:
            return self._create_drop_user_executor(executor_id, context, plan_node)
        
        else:
            raise QueryExecutionError(f"Unsupported operator type: {operator_type}")
    
    def _create_table_scan_executor(self, executor_id: str, context: ExecutionContext,
                                   plan_node: PlanNode) -> TableScanOperator:
        """创建表扫描执行器"""
        table_name = plan_node.get_property('table_name')
        if not table_name:
            raise QueryExecutionError("Table scan requires table_name property")
        
        # 获取表别名
        table_alias = plan_node.get_property('alias') or table_name
        
        # 获取表信息
        table_info = None
        if self.storage_engine:
            table_info = self.storage_engine.get_table_info(table_name)
        
        # 创建表扫描操作符，传入存储引擎和表别名
        scan_operator = TableScanOperator(executor_id, context, table_name, 
                                        table_info, self.storage_engine, table_alias)
        
        # 设置谓词和投影
        predicate = plan_node.get_property('predicate')
        if predicate:
            scan_operator.set_predicate_string(str(predicate))
        
        columns = plan_node.get_property('columns')
        if columns:
            scan_operator.set_projection(columns)
        
        return scan_operator
    
    def _create_index_scan_executor(self, executor_id: str, context: ExecutionContext,
                                   plan_node: PlanNode) -> IndexScanOperator:
        """创建索引扫描执行器"""
        table_name = plan_node.get_property('table_name')
        index_name = plan_node.get_property('index_name')
        
        if not table_name or not index_name:
            raise QueryExecutionError("Index scan requires table_name and index_name properties")
        
        # 创建模拟索引信息
        from .types import IndexInfo
        index_info = IndexInfo(
            name=index_name,
            table_name=table_name,
            columns=plan_node.get_property('index_columns', ['id'])
        )
        
        return IndexScanOperator(executor_id, context, table_name, index_info)
    
    def _create_filter_executor(self, executor_id: str, context: ExecutionContext,
                               plan_node: PlanNode, child: ExecutorBase) -> FilterOperator:
        """创建过滤执行器"""
        predicate = plan_node.get_property('predicate')
        if not predicate:
            raise QueryExecutionError("Filter operator requires predicate property")
        
        return FilterOperator(executor_id, context, child, predicate)
    
    def _create_project_executor(self, executor_id: str, context: ExecutionContext,
                                plan_node: PlanNode, child: ExecutorBase) -> ProjectOperator:
        """创建投影执行器"""
        columns = plan_node.get_property('columns')
        if not columns:
            raise QueryExecutionError("Project operator requires columns property")
        
        # 转换列规格格式：从字典列表转换为字符串列表
        column_strings = []
        for col in columns:
            if isinstance(col, dict):
                # 处理字典格式的列规格
                column = col.get('column', '')
                alias = col.get('alias')
                if alias and alias != column:
                    column_strings.append(f"{column} AS {alias}")
                else:
                    column_strings.append(column)
            elif isinstance(col, str):
                # 直接使用字符串格式
                column_strings.append(col)
            else:
                # 其他类型转换为字符串
                column_strings.append(str(col))
        
        return ProjectOperator(executor_id, context, child, column_strings)
    
    def _create_hash_join_executor(self, executor_id: str, context: ExecutionContext,
                                  plan_node: PlanNode, left_child: ExecutorBase,
                                  right_child: ExecutorBase) -> HashJoinOperator:
        """创建哈希连接执行器"""
        join_type = plan_node.get_property('join_type', JoinType.INNER)
        join_condition = plan_node.get_property('join_condition')
        
        if not join_condition:
            raise QueryExecutionError("Join operator requires join_condition property")
        
        return HashJoinOperator(executor_id, context, left_child, right_child, 
                              join_type, join_condition)
    
    def _create_nested_loop_join_executor(self, executor_id: str, context: ExecutionContext,
                                         plan_node: PlanNode, left_child: ExecutorBase,
                                         right_child: ExecutorBase) -> NestedLoopJoinOperator:
        """创建嵌套循环连接执行器"""
        join_type = plan_node.get_property('join_type', JoinType.INNER)
        join_condition = plan_node.get_property('join_condition')
        
        if not join_condition:
            raise QueryExecutionError("Join operator requires join_condition property")
        
        return NestedLoopJoinOperator(executor_id, context, left_child, right_child,
                                    join_type, join_condition)
    
    def _create_sort_merge_join_executor(self, executor_id: str, context: ExecutionContext,
                                        plan_node: PlanNode, left_child: ExecutorBase,
                                        right_child: ExecutorBase) -> SortMergeJoinOperator:
        """创建排序合并连接执行器"""
        join_type = plan_node.get_property('join_type', JoinType.INNER)
        join_condition = plan_node.get_property('join_condition')
        
        if not join_condition:
            raise QueryExecutionError("Join operator requires join_condition property")
        
        return SortMergeJoinOperator(executor_id, context, left_child, right_child,
                                   join_type, join_condition)
    
    def _create_hash_aggregate_executor(self, executor_id: str, context: ExecutionContext,
                                       plan_node: PlanNode, child: ExecutorBase) -> HashAggregateOperator:
        """创建哈希聚合执行器"""
        group_by = plan_node.get_property('group_by', [])
        aggregates = plan_node.get_property('aggregates', [])
        
        return HashAggregateOperator(executor_id, context, child, group_by, aggregates)
    
    def _create_sort_aggregate_executor(self, executor_id: str, context: ExecutionContext,
                                       plan_node: PlanNode, child: ExecutorBase) -> SortAggregateOperator:
        """创建排序聚合执行器"""
        group_by = plan_node.get_property('group_by', [])
        aggregates = plan_node.get_property('aggregates', [])
        
        return SortAggregateOperator(executor_id, context, child, group_by, aggregates)
    
    def _create_sort_executor(self, executor_id: str, context: ExecutionContext,
                             plan_node: PlanNode, child: ExecutorBase) -> SortOperator:
        """创建排序执行器"""
        order_by = plan_node.get_property('order_by')
        if not order_by:
            raise QueryExecutionError("Sort operator requires order_by property")
        
        return SortOperator(executor_id, context, child, order_by)
    
    
    def _create_union_executor(self, executor_id: str, context: ExecutionContext,
                              plan_node: PlanNode, left_child: ExecutorBase,
                              right_child: ExecutorBase) -> UnionOperator:
        """创建并集执行器"""
        distinct = plan_node.get_property('distinct', True)
        return UnionOperator(executor_id, context, left_child, right_child, distinct)
    
    def _create_intersect_executor(self, executor_id: str, context: ExecutionContext,
                                  plan_node: PlanNode, left_child: ExecutorBase,
                                  right_child: ExecutorBase) -> IntersectOperator:
        """创建交集执行器"""
        distinct = plan_node.get_property('distinct', True)
        return IntersectOperator(executor_id, context, left_child, right_child, distinct)
    
    def _create_except_executor(self, executor_id: str, context: ExecutionContext,
                               plan_node: PlanNode, left_child: ExecutorBase,
                               right_child: ExecutorBase) -> ExceptOperator:
        """创建差集执行器"""
        distinct = plan_node.get_property('distinct', True)
        return ExceptOperator(executor_id, context, left_child, right_child, distinct)
    
    def _create_create_table_executor(self, executor_id: str, context: ExecutionContext,
                                     plan_node: PlanNode) -> CreateTableOperator:
        """创建CREATE TABLE执行器"""
        table_name = plan_node.get_property('table_name')
        columns = plan_node.get_property('columns', [])
        
        if not table_name:
            raise QueryExecutionError("CREATE TABLE requires table_name property")
        if not columns:
            raise QueryExecutionError("CREATE TABLE requires columns property")
        
        return CreateTableOperator(executor_id, context, table_name, columns, self.storage_engine)
    
    def _create_drop_table_executor(self, executor_id: str, context: ExecutionContext,
                                   plan_node: PlanNode) -> DropTableOperator:
        """创建DROP TABLE执行器"""
        table_name = plan_node.get_property('table_name')
        
        if not table_name:
            raise QueryExecutionError("DROP TABLE requires table_name property")
        
        return DropTableOperator(executor_id, context, table_name, self.storage_engine)
    
    def _create_insert_executor(self, executor_id: str, context: ExecutionContext,
                               plan_node: PlanNode) -> InsertOperator:
        """创建INSERT执行器"""
        table_name = plan_node.get_property('table_name')
        values = plan_node.get_property('values', [])
        
        if not table_name:
            raise QueryExecutionError("INSERT requires table_name property")
        if not values:
            raise QueryExecutionError("INSERT requires values property")
        
        return InsertOperator(executor_id, context, table_name, values, self.storage_engine)
    
    def _create_update_executor(self, executor_id: str, context: ExecutionContext,
                               plan_node: PlanNode) -> UpdateOperator:
        """创建UPDATE执行器"""
        table_name = plan_node.get_property('table_name')
        set_clauses = plan_node.get_property('set_clauses', [])
        where_condition = plan_node.get_property('where_condition')
        
        if not table_name:
            raise QueryExecutionError("UPDATE requires table_name property")
        if not set_clauses:
            raise QueryExecutionError("UPDATE requires set_clauses property")
        
        return UpdateOperator(executor_id, context, table_name, set_clauses, where_condition, self.storage_engine)
    
    def _create_delete_executor(self, executor_id: str, context: ExecutionContext,
                               plan_node: PlanNode) -> DeleteOperator:
        """创建DELETE执行器"""
        table_name = plan_node.get_property('table_name')
        where_condition = plan_node.get_property('where_condition')
        
        if not table_name:
            raise QueryExecutionError("DELETE requires table_name property")
        
        # 如果有子节点，创建子执行器
        child_executor = None
        if plan_node.children:
            child_executor = self._build_execution_tree(plan_node.children[0], context)
        
        return DeleteOperator(executor_id, context, table_name, where_condition, 
                            self.storage_engine, child_executor)
    
    def _create_limit_executor(self, executor_id: str, context: ExecutionContext,
                              plan_node: PlanNode, child: ExecutorBase) -> LimitOperator:
        """创建LIMIT执行器"""
        # 首先尝试'limit'属性（物理计划优化器使用的）
        limit = plan_node.get_property('limit')
        if limit is None:
            # 如果没有limit属性，尝试'count'属性（逻辑计划使用的）
            limit = plan_node.get_property('count')
            if limit is None:
                raise QueryExecutionError("Limit operator requires count or limit property")
        
        offset = plan_node.get_property('offset', 0)
        
        return LimitOperator(executor_id, context, child, limit, offset)
    
    def _create_grant_executor(self, executor_id: str, context: ExecutionContext,
                              plan_node: PlanNode) -> GrantOperator:
        """创建GRANT执行器"""
        privileges = plan_node.get_property('privileges')
        object_type = plan_node.get_property('object_type')
        object_name = plan_node.get_property('object_name')
        grantees = plan_node.get_property('grantees')
        grant_option = plan_node.get_property('grant_option')
        
        if not privileges:
            raise QueryExecutionError("GRANT requires privileges property")
        if not object_type:
            raise QueryExecutionError("GRANT requires object_type property")
        if not object_name:
            raise QueryExecutionError("GRANT requires object_name property")
        if not grantees:
            raise QueryExecutionError("GRANT requires grantees property")
        
        return GrantOperator(executor_id, context, privileges, object_type, object_name, 
                           grantees, grant_option, self.storage_engine)
    
    def _create_revoke_executor(self, executor_id: str, context: ExecutionContext,
                               plan_node: PlanNode) -> RevokeOperator:
        """创建REVOKE执行器"""
        privileges = plan_node.get_property('privileges')
        object_type = plan_node.get_property('object_type')
        object_name = plan_node.get_property('object_name')
        grantees = plan_node.get_property('grantees')
        
        if not privileges:
            raise QueryExecutionError("REVOKE requires privileges property")
        if not object_type:
            raise QueryExecutionError("REVOKE requires object_type property")
        if not object_name:
            raise QueryExecutionError("REVOKE requires object_name property")
        if not grantees:
            raise QueryExecutionError("REVOKE requires grantees property")
        
        return RevokeOperator(executor_id, context, privileges, object_type, object_name, 
                            grantees, self.storage_engine)
    
    def _create_create_database_executor(self, executor_id: str, context: ExecutionContext,
                                        plan_node: PlanNode) -> CreateDatabaseOperator:
        """创建CREATE DATABASE执行器"""
        database_name = plan_node.get_property('database_name')
        charset = plan_node.get_property('charset', 'utf8')
        
        if not database_name:
            raise QueryExecutionError("CREATE DATABASE requires database_name property")
        
        return CreateDatabaseOperator(executor_id, context, database_name, charset, self.storage_engine)
    
    def _create_drop_database_executor(self, executor_id: str, context: ExecutionContext,
                                      plan_node: PlanNode) -> DropDatabaseOperator:
        """创建DROP DATABASE执行器"""
        database_name = plan_node.get_property('database_name')
        force = plan_node.get_property('force', False)
        
        if not database_name:
            raise QueryExecutionError("DROP DATABASE requires database_name property")
        
        return DropDatabaseOperator(executor_id, context, database_name, force, self.storage_engine)
    
    def _create_use_database_executor(self, executor_id: str, context: ExecutionContext,
                                     plan_node: PlanNode) -> UseDatabaseOperator:
        """创建USE DATABASE执行器"""
        database_name = plan_node.get_property('database_name')
        
        if not database_name:
            raise QueryExecutionError("USE DATABASE requires database_name property")
        
        return UseDatabaseOperator(executor_id, context, database_name, self.storage_engine)
    
    def _create_list_databases_executor(self, executor_id: str, context: ExecutionContext,
                                       plan_node: PlanNode) -> ListDatabasesOperator:
        """创建LIST DATABASES执行器"""
        return ListDatabasesOperator(executor_id, context, self.storage_engine)
    
    def _create_create_user_executor(self, executor_id: str, context: ExecutionContext,
                                   plan_node: PlanNode) -> CreateUserOperator:
        """创建CREATE USER执行器"""
        username = plan_node.get_property('username')
        password = plan_node.get_property('password')
        return CreateUserOperator(executor_id, context, username, password, self.storage_engine)
    
    def _create_drop_user_executor(self, executor_id: str, context: ExecutionContext,
                                 plan_node: PlanNode) -> DropUserOperator:
        """创建DROP USER执行器"""
        username = plan_node.get_property('username')
        return DropUserOperator(executor_id, context, username, self.storage_engine)
    
    def cancel_query(self, query_id: str) -> bool:
        """取消查询"""
        with self.lock:
            if query_id in self.active_queries:
                pipeline = self.active_queries[query_id]
                pipeline.stop()
                del self.active_queries[query_id]
                
                # 记录查询取消
                self.statistics.record_query_end(query_id, "cancelled")
                return True
            
            return False
    
    def get_active_queries(self) -> List[str]:
        """获取活动查询列表"""
        with self.lock:
            return list(self.active_queries.keys())
    
    def get_query_status(self, query_id: str) -> Optional[Dict[str, Any]]:
        """获取查询状态"""
        with self.lock:
            if query_id in self.active_queries:
                pipeline = self.active_queries[query_id]
                stats = pipeline.get_pipeline_stats()
                
                return {
                    "query_id": query_id,
                    "status": "running",
                    "pipeline_stats": stats,
                    "start_time": pipeline.context.start_time
                }
            
            # 检查历史查询
            historical_stats = self.statistics.get_query_statistics(query_id)
            if historical_stats:
                return {
                    "query_id": query_id,
                    "status": historical_stats.get("status", "unknown"),
                    "stats": historical_stats
                }
            
            return None
    
    def get_performance_metrics(self) -> Dict[str, Any]:
        """获取性能指标"""
        return {
            "summary": self.statistics.get_performance_summary(),
            "slow_queries": self.statistics.get_top_slow_queries(10),
            "operator_performance": self.statistics.get_operator_performance(),
            "system_metrics": self.statistics.collector.get_all_metrics(),
            "memory_stats": self.memory_manager.get_memory_stats(),
            "transaction_stats": self.transaction_manager.get_transaction_stats()
        }
    
    def shutdown(self):
        """关闭执行器"""
        # 取消所有活动查询
        with self.lock:
            query_ids = list(self.active_queries.keys())
            for query_id in query_ids:
                self.cancel_query(query_id)
        
        # 关闭统计模块
        self.statistics.shutdown()
        
        # 清理内存管理器
        self.memory_manager.cleanup()


