# engine/executor.py
"""
模块职责：查询执行引擎。
它接收一个由编译器(Compiler)生成的、树状的“执行计划”(Execution Plan)，
并根据计划中的操作（如SCAN, JOIN, FILTER等）调用存储引擎(StorageEngine)
提供的API来获取和处理数据，最终产生查询结果。
"""
import os
from pysql_compiler.catalog import TableSchema
from .catalog_manager import CatalogManager
from .storage_engine import StorageEngine
import collections
import re

# 避免循环导入，仅用于类型提示
from typing import TYPE_CHECKING

if TYPE_CHECKING:
    from pysql_compiler.compiler import Compiler

class Executor:
    """
    执行器 (Executor)
    采用火山模型（或迭代器模型），通过递归调用execute方法来执行计划树中的每个节点。
    每个执行节点从其子节点获取数据，进行处理，然后向上层节点返回结果。
    """
    def __init__(self, catalog_manager: CatalogManager, storage_engine: StorageEngine, compiler: 'Compiler'):
        self.catalog_manager = catalog_manager
        self.storage_engine = storage_engine
        self.compiler = compiler  # 保存对编译器的引用

    def execute(self, plan: dict):
        """根据执行计划的 operation 类型，分发到具体的执行方法。"""
        op = plan.get('operation')

        if op == 'SOURCE':
            return self._execute_source(plan)
        if op == 'EXPLAIN':
            return self._execute_explain(plan)

        # DDL (数据定义语言) 操作
        if op == 'CREATE_TABLE':
            return self._execute_create_table(plan)
        if op == 'CREATE_INDEX':
            return self._execute_create_index(plan)
        if op == 'TRUNCATE_TABLE':
            return self._execute_truncate_table(plan)
        if op == 'DROP_TABLE':
            return self._execute_drop_table(plan)
        if op == 'ALTER_TABLE':
            return self._execute_alter_table(plan)

        # DML 操作
        if op == 'INSERT':
            if isinstance(plan, list):
                for sub_plan in plan:
                    self._execute_insert(sub_plan)
                print(f"执行结果: {len(plan)}行数据已插入")
                return []
            else:
                return self._execute_insert(plan)

        if op == 'DELETE':
            return self._execute_delete(plan)
        if op == 'UPDATE':
            return self._execute_update(plan)
        if op == 'SHOW_TABLES':
            return self._execute_show_tables(plan)
        # DQL and Execution Plan 操作
        if op == 'PROJECT':
            return self._execute_project(plan)
        if op == 'AGGREGATE':
            return self._execute_aggregate(plan)
        if op == 'DISTINCT':
            return self._execute_distinct(plan)
        if op == 'SORT':
            return self._execute_sort(plan)
        if op == 'SCAN':
            return self._execute_scan(plan)
        if op == 'INDEX_SCAN':
            return self._execute_index_scan(plan)
        if op == 'FILTER':
            source_plan = plan['source']
            # 检查数据源是否为 SCAN
            if source_plan.get('operation') == 'SCAN':
                # 如果是，直接执行带有谓词的 SCAN，实现过滤和扫描的融合
                return self._execute_scan(source_plan, predicate=plan['predicate'])
            else:
                # 如果数据源不是 SCAN (例如是 JOIN 的结果)，则按原方式过滤
                source_data = self.execute(plan['source'])
                if plan.get('is_having'):
                    output_schema = plan['source'].get('output_schema', [])
                    return self._apply_aggregate_filter(source_data, plan['predicate'], output_schema)
                else:
                    table_name = self._get_table_name_from_plan(plan)
                    return self._apply_filter(source_data, plan['predicate'], table_name)
        # --- 谓词下推优化结束 ---
        if op == 'JOIN':
            return self._execute_block_nested_loop_join(plan)
            # return self._execute_join(plan)

        raise ValueError(f"不支持的执行操作: {op}")

    def _execute_source(self, plan: dict):
        """
        执行 'SOURCE' 命令，读取并批量执行一个.sql文件。
        """
        filepath = plan['filepath']
        try:
            if not os.path.exists(filepath):
                print(f"[错误] 文件未找到: {filepath}")
                return []

            with open(filepath, 'r', encoding='utf-8') as f:
                sql_script = f.read()

            # 在分割前，使用正则表达式移除所有'--'注释
            sql_script = re.sub(r'--.*', '', sql_script)

            # 使用分号分割SQL语句
            statements = sql_script.split(';')

            print(f"--- 开始执行文件 '{filepath}' ---")
            successful_count = 0
            failed_count = 0

            for statement in statements:
                # 忽略空的或仅包含空格的语句
                if not statement.strip():
                    continue

                try:
                    print(f"\n正在执行: {statement.strip()};")
                    # 对文件中的每条语句，回头调用编译器和执行器完成完整的处理流程
                    sub_plan, _ = self.compiler.compile(statement)

                    # 检查返回的计划是否为列表（对应多行INSERT）
                    if isinstance(sub_plan, list):
                        # 如果是列表，就遍历并执行其中的每个子计划
                        for p in sub_plan:
                            self.execute(p)
                    else:
                        # 否则，直接执行单个计划
                        self.execute(sub_plan)
                    # --- 修复结束 ---

                    successful_count += 1
                except Exception as e:
                    failed_count += 1
                    print(f"[语句执行失败] {e}")

            print(f"\n--- 文件 '{filepath}' 执行完毕 ---")
            print(f"总计: {successful_count} 条成功, {failed_count} 条失败。")

        except FileNotFoundError:
            print(f"[错误] 文件未找到: {filepath}")
        except Exception as e:
            print(f"[读取或执行文件时发生严重错误] {e}")

        return []

    def _execute_alter_table(self, plan: dict):
        """
        执行 ALTER TABLE 操作。
        该操作会保留现有数据，并在修改表结构后进行数据迁移。
        """
        table_name = plan['table_name']
        action = plan['action']
        sub_op = action['type']

        # --- 步骤 1: 数据备份 ---
        # 在进行任何修改前，先读取并备份表中的所有数据
        print("--- [Executor] 正在备份现有数据...")
        try:
            # list() 会将迭代器中的所有数据加载到内存
            backup_data = list(self.storage_engine.scan(table_name))
            print(f"--- [Executor] 成功备份 {len(backup_data)} 条数据。")
        except Exception as e:
            # 如果扫描失败，则不继续执行
            raise RuntimeError(f"备份表 '{table_name}' 数据时出错: {e}")

        # 获取旧的 schema 信息，用于后续数据映射
        old_schema = self.catalog_manager.get_table_schema(table_name)
        if not old_schema:
            raise RuntimeError(f"表 '{table_name}' 不存在")
        old_column_names = old_schema.column_order.copy()

        if sub_op == 'ADD_PRIMARY_KEY':
            col_name = action['column_name']
            try:
                # 使用旧的schema来获取列的索引
                col_index = old_schema.column_order.index(col_name)
                seen_values = set()
                for row in backup_data:
                    value = row[col_index]
                    if value is None:
                        raise ValueError(f"无法添加主键: 列 '{col_name}' 包含 NULL 值。")
                    if value in seen_values:
                        raise ValueError(f"无法添加主键: 列 '{col_name}' 包含重复值 '{value}'。")
                    seen_values.add(value)
            except ValueError as e:
                 # 捕获列不存在或重复/NULL值的错误
                 raise RuntimeError(f"为表 '{table_name}' 添加主键时失败: {e}")

        # --- 步骤 2: 清空表的物理页面 ---
        # 清空页面引用，为数据迁移做准备
        if old_schema.pages:
            old_schema.pages = []

        # --- 步骤 3: 修改表的元数据（Schema） ---
        if sub_op == 'ADD_COLUMN':
            col_def = action['column_def']
            col_name = col_def['column_name']['name']
            data_type = col_def['data_type']['name']
            self.catalog_manager.add_column(table_name, col_name, data_type)
            print(f"执行结果: 表 '{table_name}' 的结构已成功添加列 '{col_name}'.")
        elif sub_op == 'DROP_COLUMN':
            col_name = action['column_name']
            self.catalog_manager.drop_column(table_name, col_name)
            print(f"执行结果: 表 '{table_name}' 已成功删除列 '{col_name}'.")
        elif sub_op == 'ALTER_COLUMN':
            col_name = action['column_name']
            new_data_type = action['new_data_type']['name']
            self.catalog_manager.alter_column_type(table_name, col_name, new_data_type)
            print(f"执行结果: 表 '{table_name}' 的列 '{col_name}' 类型已修改为 '{new_data_type}'.")
        elif sub_op == 'ADD_PRIMARY_KEY':
            col_name = action['column_name']
            self.catalog_manager.add_primary_key(table_name, col_name)
            print(f"执行结果: 已为表 '{table_name}' 的列 '{col_name}' 添加主键.")
        else:
            raise ValueError(f"不支持的 ALTER TABLE 操作: {sub_op}")

        # --- 步骤 4: 数据恢复与迁移 ---
        if backup_data:
            print(f"--- [Executor] 正在恢复并迁移 {len(backup_data)} 条数据...")

            # 获取更新后的新 schema
            new_schema = self.catalog_manager.get_table_schema(table_name)
            new_column_names = new_schema.column_order

            for old_row_tuple in backup_data:
                # 将旧行数据与其列名对应起来，方便查找
                old_row_map = dict(zip(old_column_names, old_row_tuple))

                # 根据新 schema 的列顺序构建新行
                new_row = []
                for new_col_name in new_column_names:
                    # 如果新列在旧数据中存在，则使用旧数据；否则，使用 None (SQL NULL)
                    value = old_row_map.get(new_col_name, None)
                    new_row.append(value)

                # 将构建好的新行插入到表中
                self.storage_engine.insert_tuple(table_name, tuple(new_row))

            print("--- [Executor] 数据迁移完成。")

        # --- 步骤 5: 保存元数据变更 ---
        self.catalog_manager.save_catalog()
        return []

    def _execute_create_table(self, plan: dict):
        table_name = plan['table_name']
        columns = plan['columns']
        constraints = plan.get('constraints', [])  # 获取表级约束
        self.catalog_manager.create_table(table_name, columns, constraints)
        self.catalog_manager.save_catalog()
        print(f"执行结果: 表 '{table_name}' 创建成功。")
        return []

    def _execute_create_index(self, plan: dict):
        index_name = plan['index_name']
        table_name = plan['table_name']
        column_name = plan['column_name']

        self.catalog_manager.create_index(index_name, table_name, column_name)
        print(f"执行结果: 索引元数据 '{index_name}' 创建成功。")

        print(f"正在为表'{table_name}'的'{column_name}'列构建索引'{index_name}'...")
        table_schema = self.catalog_manager.get_table_schema(table_name)
        index_data = self.catalog_manager.get_index_data_structure(index_name)

        try:
            col_index = table_schema.column_order.index(column_name)
        except ValueError:
            raise RuntimeError(f"列 '{column_name}' 在表 '{table_name}' 中不存在。")

        rows_scanned = 0
        for rid, tuple_data in self.storage_engine.scan_with_rid(table_name):
            key = tuple_data[col_index]
            if key not in index_data:
                index_data[key] = []
            index_data[key].append(rid)
            rows_scanned += 1

        print(f"索引构建完成，共处理 {rows_scanned} 行数据。")
        return []

    def _execute_truncate_table(self, plan: dict):
        """执行 TRUNCATE TABLE 语句"""
        table_name = plan['table_name']

        # 获取表的所有页面
        schema = self.catalog_manager.get_table_schema(table_name)
        if not schema:
            raise RuntimeError(f"表 '{table_name}' 不存在")

        # 清空表的所有页面
        for page_id in schema.pages:
            # 这里需要实现清空页面的逻辑
            # 由于storage部分不能修改，这里简化实现
            pass

        # 重置表的页面列表
        schema.pages = []
        self.catalog_manager.save_catalog()

        print(f"执行结果: 表 '{table_name}' 已清空")
        return []

    def _execute_drop_table(self, plan: dict):
        """执行 DROP TABLE 语句"""
        table_name = plan['table_name']

        # 从catalog中删除表
        if table_name in self.catalog_manager._catalog.tables:
            del self.catalog_manager._catalog.tables[table_name]
            self.catalog_manager.save_catalog()
            print(f"执行结果: 表 '{table_name}' 已删除")
        else:
            raise RuntimeError(f"表 '{table_name}' 不存在")

        return []

    def _execute_insert(self, plan: dict):
        table_name = plan['table_name']
        schema = self.catalog_manager.get_table_schema(table_name)
        provided_values = [val['value'] for val in plan['values']]
        provided_columns = plan.get('columns', [])

        # 1. 构建最终要插入的完整行数据 (final_values)
        final_values = []
        # (这部分构建 final_values 的逻辑保持不变)
        if provided_columns:
            provided_data = dict(zip(provided_columns, provided_values))
            for col_name in schema.column_order:
                if col_name in provided_data:
                    final_values.append(provided_data[col_name])
                elif col_name == schema.auto_increment_column:
                    final_values.append(schema.next_auto_increment_value)
                    schema.next_auto_increment_value += 1
                else:
                    final_values.append(None)
        else:
            val_idx = 0
            for col_name in schema.column_order:
                if col_name == schema.auto_increment_column:
                    final_values.append(schema.next_auto_increment_value)
                    schema.next_auto_increment_value += 1
                else:
                    if val_idx < len(provided_values):
                        final_values.append(provided_values[val_idx])
                        val_idx += 1
                    else:
                        final_values.append(None)

        # --- 2. 约束检查区 (完整版) ---

        # 检查 NOT NULL 约束
        if schema.not_null_columns:
            for col_name in schema.not_null_columns:
                col_index = schema.column_order.index(col_name)
                if final_values[col_index] is None:
                    raise ValueError(f"NOT NULL constraint failed for column '{col_name}'.")

        # 检查 UNIQUE 约束
        if schema.unique_constraints:

            existing_data = list(self.storage_engine.scan(table_name))

            for constraint in schema.unique_constraints:
                # 检查是单列约束 (str) 还是多列约束 (tuple)
                if isinstance(constraint, str): # 单列约束
                    col_index = schema.column_order.index(constraint)
                    value_to_check = final_values[col_index]
                    if value_to_check is None: continue
                    for row in existing_data:
                        if row[col_index] == value_to_check:
                            raise ValueError(f"UNIQUE constraint failed for column '{constraint}'. Value '{value_to_check}' already exists.")

                elif isinstance(constraint, tuple): # 多列约束
                    col_indices = [schema.column_order.index(col) for col in constraint]
                    values_to_check = tuple(final_values[i] for i in col_indices)
                    if None in values_to_check: continue
                    for row in existing_data:
                        if tuple(row[i] for i in col_indices) == values_to_check:
                            raise ValueError(f"UNIQUE constraint failed for columns {constraint}. The combination of values {values_to_check} already exists.")


        # 检查 FOREIGN KEY 约束
        if schema.foreign_keys:
            for fk in schema.foreign_keys:
                local_col = fk.column_name['name']
                col_index = schema.column_order.index(local_col)
                value_to_check = final_values[col_index]
                if value_to_check is None: continue

                ref_table = fk.ref_table['name']
                ref_col = fk.ref_column['name']
                ref_schema = self.catalog_manager.get_table_schema(ref_table)
                ref_col_index = ref_schema.column_order.index(ref_col)

                found = any(parent_row[ref_col_index] == value_to_check for parent_row in self.storage_engine.scan(ref_table))
                if not found:
                    raise ValueError(f"FOREIGN KEY constraint failed: value '{value_to_check}' not found in {ref_table}({ref_col}).")

        # 检查 CHECK 约束
        if schema.check_constraints:
            for check in schema.check_constraints:
                if not self._eval_predicate(final_values, check, schema):
                    raise ValueError(f"CHECK constraint failed for table '{table_name}'.")

        # --- 3. 如果所有约束都通过，才执行真正的插入 ---
        self.storage_engine.insert_tuple(table_name, tuple(final_values))
        print(f"执行结果: 1 行数据已插入到 '{table_name}'。")
        return []

    def _execute_project(self, plan: dict) -> list:
        source_data = self.execute(plan['source'])

        if not source_data:
            return []

        # 始终从源获取带表名限定的 (qualified) schema，以便正确解析列，尤其是在 JOIN 之后。
        source_schema = self._get_schema_from_plan(plan['source'], qualified=True)
        if not source_schema:
            raise RuntimeError("无法为投影操作确定数据源的结构。")

        # 处理 SELECT * 的情况
        if plan['columns'] and plan['columns'][0]['type'] == 'star':
            return source_data

        # 此列表将保存我们需要选择的列的名称，
        # 例如：['name', 'age'] 或 ['students.name', 'courses.course_name']
        projected_col_names = []
        for col in plan['columns']:
            if col.get('type') == 'alias':
                projected_col_names.append(col['alias']['name'])
            elif col.get('type') == 'identifier':
                projected_col_names.append(col['name'])

        col_indices = []
        # 现在，为每个需要投影的列在源 schema 中找到对应的索引
        for col_name in projected_col_names:
            try:
                # 方案 A: 尝试直接匹配。
                # 这适用于带限定的名称 ('students.name') 或聚合产生的别名 ('student_count')。
                index = source_schema.index(col_name)
                col_indices.append(index)
                continue
            except ValueError:
                # 方案 B: 如果直接匹配失败，且列名不带限定（不含'.'），
                # 这可能是对单表的简单 SELECT 查询。
                if '.' not in col_name:
                    # 在源 schema 中搜索以该名称结尾的唯一列。
                    possible_matches = [s_col for s_col in source_schema if s_col.split('.')[-1] == col_name]

                    if len(possible_matches) == 1:
                        # 找到了唯一且无歧义的匹配项。
                        index = source_schema.index(possible_matches[0])
                        col_indices.append(index)
                    elif len(possible_matches) > 1:
                        # 列名有歧义 (例如: SELECT id FROM t1 JOIN t2)。
                        raise RuntimeError(f"列引用 '{col_name}' 存在歧义。可能的匹配项: {possible_matches}")
                    else:
                        # 完全没有找到匹配项。
                        raise RuntimeError(f"投影列错误: '{col_name}' 不是一个有效的列。可用的列: {source_schema}")
                else:
                    # 列名是带限定的，但仍然没有找到。
                    raise RuntimeError(f"投影列错误: '{col_name}' 不是一个有效的列。可用的列: {source_schema}")

        # 使用找到的索引构建最终结果
        projected_results = []
        for row in source_data:
            projected_row = [row[i] for i in col_indices]
            projected_results.append(projected_row)

        return projected_results

    def _execute_aggregate(self, plan: dict):
        source_data = self.execute(plan['source'])
        group_by_cols = [gb['name'] for gb in plan.get('group_by', [])]
        aggregates = plan.get('aggregates', [])

        # 获取聚合操作的源数据结构（schema）
        # 注意：这里我们不能用 get_table_schema，因为源数据可能是JOIN或其它操作的结果
        source_schema = self._get_schema_from_plan(plan['source'], qualified=False)
        if not source_schema:
            raise RuntimeError("无法确定聚合操作的数据源结构。")

        # --- 1. 分组阶段 ---
        if not group_by_cols:
            # 处理没有GROUP BY的聚合查询 (例如: SELECT COUNT(*) FROM students)
            grouped_data = {'all_rows': source_data}
        else:
            # 处理带GROUP BY的查询
            grouped_data = collections.defaultdict(list)
            try:
                group_by_indices = [source_schema.index(col) for col in group_by_cols]
            except ValueError as e:
                raise RuntimeError(f"GROUP BY 子句中的列 '{e.args[0]}' 不存在于源数据中。")

            for row in source_data:
                key = tuple(row[i] for i in group_by_indices)
                grouped_data[key].append(row)

        # --- 2. 聚合计算阶段 ---
        results = []
        for key, group_rows in grouped_data.items():
            if not group_rows: continue

            result_row = []
            # 创建一个从列名到分组键值的映射，方便提取
            group_key_map = dict(zip(group_by_cols, key)) if group_by_cols else {}

            for agg_plan in aggregates:
                # 场景 A: 该列是 GROUP BY 的一部分，直接从key中获取
                if agg_plan.get('type') == 'identifier':
                    result_row.append(group_key_map[agg_plan['name']])
                    continue

                # 场景 B: 该列是一个聚合函数 (例如 COUNT, SUM)
                agg_func_plan = agg_plan
                if agg_plan.get('type') == 'alias':
                    agg_func_plan = agg_plan.get('expression', {})

                if agg_func_plan.get('type') == 'function_call':
                    func_name = agg_func_plan.get('func_name', '').upper()
                    arg = agg_func_plan['args'][0] if agg_func_plan.get('args') else None

                    if func_name == 'COUNT':
                        if arg and arg.get('type') == 'star':
                            # 对应 COUNT(*)
                            result_row.append(len(group_rows))
                        else:
                            # 对应 COUNT(column)，需要统计非NULL值的数量
                            try:
                                col_index = source_schema.index(arg['name'])
                                count = sum(1 for row in group_rows if row[col_index] is not None)
                                result_row.append(count)
                            except (ValueError, IndexError):
                                raise RuntimeError(f"COUNT函数中的列 '{arg['name']}' 不存在。")
                    else:
                        # 在此处可以扩展支持 SUM, AVG, MAX, MIN 等其他函数
                        raise NotImplementedError(f"聚合函数 '{func_name}' 尚未实现。")
            results.append(result_row)

        return results

    def _execute_distinct(self, plan: dict) -> list:
        source_data = self.execute(plan['source'])
        unique_rows = set()
        result = []
        for row in source_data:
            row_tuple = tuple(row)
            if row_tuple not in unique_rows:
                unique_rows.add(row_tuple)
                result.append(row)
        return result

    def _execute_sort(self, plan: dict) -> list:
        """执行 SORT 操作 (ORDER BY)。"""
        # 1. 先执行子计划，获取待排序的数据源
        source_data = self.execute(plan['source'])

        sort_key, order = plan['sort_key'], plan['order'].upper()

        source_schema = self._get_schema_from_plan(plan['source'])
        if not source_schema:
            raise RuntimeError(f"无法确定排序操作的数据源结构。")
        # --- 修复结束 ---

        try:
            # 在正确的schema(source_schema)中查找排序列的索引
            col_index = source_schema.index(sort_key)
        except ValueError:
            raise RuntimeError(f"排序列 '{sort_key}' 不在输入数据源的列中: {source_schema}。")

        def sort_key_func(row):
            val = row[col_index]
            try:
                # 尝试将值转换为数值类型进行比较，以确保数字排序的正确性
                return float(val) if isinstance(val, str) and '.' in val else int(val)
            except (ValueError, TypeError):
                # 如果转换失败，则按原样（通常是字符串）进行比较
                return val

        reverse = (order == 'DESC')
        sorted_data = sorted(source_data, key=sort_key_func, reverse=reverse)
        return sorted_data

    def _execute_join(self, plan: dict) -> list:
        """执行 JOIN 操作（已修复并包含 LEFT 和 RIGHT JOIN 逻辑）"""
        left_data = self.execute(plan['left_source'])
        right_data = self.execute(plan['right_source'])
        join_type = plan['join_type']
        condition = plan['condition']

        # 解析连接条件
        left_col, right_col = self._parse_join_condition(condition)

        # 获取左右表的原始 (unqualified) schema 以便查找索引
        left_schema = self._get_schema_from_plan(plan['left_source'], qualified=False)
        right_schema = self._get_schema_from_plan(plan['right_source'], qualified=False)

        try:
            left_col_idx = left_schema.index(left_col)
            right_col_idx = right_schema.index(right_col)
        except ValueError:
            raise RuntimeError(f"JOIN条件中的列不存在: {left_col} 或 {right_col}")

        result = []
        right_hash = collections.defaultdict(list)
        for right_row in right_data:
            key = right_row[right_col_idx]
            right_hash[key].append(right_row)

        # --- INNER and LEFT JOIN Logic ---
        for left_row in left_data:
            key = left_row[left_col_idx]
            matched = False
            if key in right_hash:
                matched = True
                for right_row in right_hash[key]:
                    result.append(left_row + right_row)

            if not matched and join_type == 'LEFT':
                # 修复 #1: 将 [None,...] 列表转换为元组再进行拼接
                result.append(left_row + tuple([None] * len(right_schema)))

        # --- RIGHT JOIN Logic ---
        if join_type == 'RIGHT':
            # 找出所有在左表中被匹配过的右表键
            matched_right_keys = set()
            for left_row in left_data:
                key = left_row[left_col_idx]
                if key in right_hash:
                    matched_right_keys.add(key)

            # 遍历右表，添加未被匹配的行
            for right_row in right_data:
                key = right_row[right_col_idx]
                if key not in matched_right_keys:
                    # 修复 #2: 将 [None,...] 列表转换为元组再进行拼接
                    result.append(tuple([None] * len(left_schema)) + right_row)

        return result

    def _parse_join_condition(self, condition: dict) -> tuple:
        """解析JOIN条件，返回(左表列名, 右表列名)"""
        if condition['op'] != '=':
            raise RuntimeError("只支持等值JOIN条件")

        left = condition['left']
        right = condition['right']

        if left['type'] != 'identifier' or right['type'] != 'identifier':
            raise RuntimeError("JOIN条件必须为列名")

        left_col = left['name'].split('.')[-1]
        right_col = right['name'].split('.')[-1]

        return left_col, right_col

    def _get_schema_from_plan(self, plan: dict, qualified=True) -> list:
        """
        从执行计划中递归获取schema。
        :param plan: 执行计划节点
        :param qualified: 是否返回带表名限定的列名
        :return: 列名列表
        """
        op = plan.get('operation')

        if op == 'SCAN':
            table_name = plan['table_name']
            schema = self.catalog_manager.get_table_schema(table_name)
            simple_schema = schema.column_order if schema else []
            if qualified:
                return [f"{table_name}.{col}" for col in simple_schema]
            return simple_schema

        elif op == 'AGGREGATE':
            # 聚合操作的schema由其output_schema定义
            return plan.get('output_schema', [])
        elif op == 'JOIN':
            left_schema = self._get_schema_from_plan(plan['left_source'], qualified=qualified)
            right_schema = self._get_schema_from_plan(plan['right_source'], qualified=qualified)
            # JOIN的结果总是返回限定名，以避免歧义
            return left_schema + right_schema

        elif op == 'PROJECT':
            # PROJECT的schema由其选择的列决定
            if plan['columns'][0]['type'] == 'star':
                # 如果是SELECT *，schema就是源的schema
                return self._get_schema_from_plan(plan['source'], qualified)
            else:
                # 否则，schema是选择的列名或别名
                schema = []
                for col in plan['columns']:
                    if col.get('type') == 'alias':
                        schema.append(col['alias']['name'])
                    elif col.get('type') == 'identifier':
                        # 如果需要非限定名，则进行切分
                        col_name = col['name']
                        if not qualified and '.' in col_name:
                            schema.append(col_name.split('.')[-1])
                        else:
                            schema.append(col_name)
                return schema

        elif 'source' in plan:
            # 对于FILTER, SORT等操作，schema与其源相同
            return self._get_schema_from_plan(plan['source'], qualified)

        return []

    def _get_table_name_from_plan(self, plan: dict) -> str:
        if 'table_name' in plan:
            return plan['table_name']
        if 'source' in plan:
            return self._get_table_name_from_plan(plan['source'])
        # 对于JOIN，尝试从左侧获取
        if 'left_source' in plan:
            return self._get_table_name_from_plan(plan['left_source'])
        return None

    def _execute_scan(self, plan: dict, predicate: dict = None) -> list:
        """
        执行 SCAN 操作, 已集成谓词下推逻辑。
        :param plan: SCAN 操作的执行计划。
        :param predicate: (可选) 来自上层 FILTER 节点的过滤条件。
        """
        table_name = plan['table_name']

        # 如果没有过滤条件，执行原有的全表扫描
        if not predicate:
            return list(self.storage_engine.scan(table_name))

        # 如果有过滤条件，则在扫描时进行过滤
        results = []
        schema = self.catalog_manager.get_table_schema(table_name)
        if not schema:
            return []

        # 遍历存储引擎返回的每一行
        for row in self.storage_engine.scan(table_name):
            # 对每一行评估过滤条件
            if self._eval_predicate(row, predicate, schema):
                results.append(row)

        # 返回包含所有匹配行的列表，保持接口不变
        return results

    def _execute_block_nested_loop_join(self, plan: dict, block_size=1000) -> list:
        """执行块嵌套循环 JOIN 操作，以优化内存使用，并支持 LEFT JOIN。"""
        left_data_source = self.execute(plan['left_source'])
        right_data_source = self.execute(plan['right_source'])
        condition = plan['condition']
        join_type = plan.get('join_type', 'INNER') # 默认为 INNER JOIN

        left_col, right_col = self._parse_join_condition(condition)
        left_schema = self._get_schema_from_plan(plan['left_source'], qualified=False)
        right_schema = self._get_schema_from_plan(plan['right_source'], qualified=False)

        try:
            left_col_idx = left_schema.index(left_col)
            right_col_idx = right_schema.index(right_col)
        except ValueError:
            raise RuntimeError(f"JOIN条件中的列不存在: {left_col} 或 {right_col}")

        # 在 LEFT JOIN 中，左表必须是外表
        if join_type == 'LEFT':
            outer_table, inner_table = left_data_source, right_data_source
            outer_col_idx, inner_col_idx = left_col_idx, right_col_idx
            swapped = False
        # 在 RIGHT JOIN 中，右表必须是外表 (尚未完全支持，但为未来保留)
        elif join_type == 'RIGHT':
            outer_table, inner_table = right_data_source, left_data_source
            outer_col_idx, inner_col_idx = right_col_idx, left_col_idx
            swapped = True
        else: # 对于 INNER JOIN，启发式规则：选择较小的表作为外表
            if len(left_data_source) <= len(right_data_source):
                outer_table, inner_table = left_data_source, right_data_source
                outer_col_idx, inner_col_idx = left_col_idx, right_col_idx
                swapped = False
            else:
                outer_table, inner_table = right_data_source, left_data_source
                outer_col_idx, inner_col_idx = right_col_idx, left_col_idx
                swapped = True


        result = []
        inner_hash = collections.defaultdict(list)
        for inner_row in inner_table:
            key = inner_row[inner_col_idx]
            inner_hash[key].append(inner_row)

        # 按块处理外表
        for i in range(0, len(outer_table), block_size):
            block = outer_table[i:i + block_size]

            for outer_row in block:
                key = outer_row[outer_col_idx]
                matched = False # 标志位：用于LEFT JOIN判断
                if key in inner_hash:
                    matched = True
                    for inner_row in inner_hash[key]:
                        if not swapped:
                            result.append(outer_row + inner_row)
                        else:
                            result.append(inner_row + outer_row)

                # 如果是 LEFT JOIN 且外表是左表，并且没有找到匹配项
                if not matched and join_type == 'LEFT' and not swapped:
                    # 将右表部分填充为 None
                    result.append(outer_row + tuple([None] * len(right_schema)))
                # 如果是 RIGHT JOIN 且外表是右表，并且没有找到匹配项
                elif not matched and join_type == 'RIGHT' and swapped:
                    result.append(tuple([None] * len(left_schema)) + outer_row)


        return result

    # 在 executor.py 中
    def _execute_index_scan(self, plan: dict) -> list:
        index_name = plan['index_name']
        table_name = self._get_table_name_from_plan(plan) # 需要知道表名
        key_to_find = plan['key']

        # 1. 从目录获取索引的根页面ID
        root_page_id = self.catalog_manager.get_index_root_page_id(index_name)

        # 2. 调用存储引擎的B+树搜索
        rids = self.storage_engine.btree_search(root_page_id, str(key_to_find))

        if not rids:
            return []

        # 3. 根据返回的RIDs获取元组
        return [self.storage_engine.get_tuple(table_name, rid) for rid in rids]

    def _execute_delete(self, plan: dict):
        table_name = plan['table_name']
        predicate = plan.get('predicate')
        if not predicate:
            print("安全起见，不支持无WHERE条件的DELETE。")
            return []
        rids_to_delete = []
        for rid, tuple_data in self.storage_engine.scan_with_rid(table_name):
            schema = self.catalog_manager.get_table_schema(table_name)
            if self._eval_predicate(tuple_data, predicate, schema):
                rids_to_delete.append(rid)
        for rid in rids_to_delete:
            self.storage_engine.delete_tuple(table_name, rid)
        print(f"执行结果: 从 '{table_name}' 中删除了 {len(rids_to_delete)} 行数据。")
        return []

    def _execute_update(self, plan: dict):
        """执行 UPDATE 语句（已更新以处理表达式和 UPDATE FROM）"""
        table_name = plan['table_name']

        # --- 核心修改：将 UPDATE FROM 的逻辑分派给新的 _execute_update_from ---
        if 'from_table' in plan:
            return self._execute_update_from(plan)
        # --- 修改结束 ---

        # （处理不带 FROM 的普通 UPDATE 语句的逻辑保持不变）
        predicate = plan.get('predicate')
        assignments = plan.get('assignments', [])
        schema = self.catalog_manager.get_table_schema(table_name)
        if not schema:
            raise RuntimeError(f"表 '{table_name}' 不存在。")

        updates_to_perform = []
        for rid, current_tuple in self.storage_engine.scan_with_rid(table_name):
            if not predicate or self._eval_predicate(current_tuple, predicate, schema):
                new_tuple = list(current_tuple)
                for assign in assignments:
                    col_name = assign['column']['name']
                    # 调用求值函数
                    new_value = self._eval_expression(assign['value'], current_tuple, schema)
                    try:
                        col_index = schema.column_order.index(col_name)
                        new_tuple[col_index] = new_value
                    except ValueError:
                        raise RuntimeError(f"列 '{col_name}' 在表 '{table_name}' 中不存在。")
                updates_to_perform.append({'rid': rid, 'new_tuple': new_tuple})

        for update in updates_to_perform:
            self.storage_engine.update_tuple(table_name, update['rid'], update['new_tuple'])
        print(f"执行结果: 在 '{table_name}' 中更新了 {len(updates_to_perform)} 行数据。")
        return []

    def _execute_update_from(self, plan: dict):
        """
        执行 UPDATE FROM 语句 (全新重写版本)。
        此版本能够处理包含多个条件的复杂 WHERE 子句。
        """
        main_table_name = plan['table_name']
        from_table_name = plan['from_table']['name']
        predicate = plan.get('predicate')
        assignments = plan.get('assignments', [])

        main_schema = self.catalog_manager.get_table_schema(main_table_name)
        from_schema = self.catalog_manager.get_table_schema(from_table_name)
        if not main_schema or not from_schema:
            raise RuntimeError(f"表 '{main_table_name}' 或 '{from_table_name}' 不存在。")

        # 1. 创建一个临时的“连接后”的 schema，包含两个表的所有列（带表名限定）
        #    例如: ['students.id', 'students.name', ..., 'grades.student_id', ...]
        joined_schema = [f"{main_table_name}.{col}" for col in main_schema.column_order] + \
                        [f"{from_table_name}.{col}" for col in from_schema.column_order]

        # 2. 将 'from' 表加载到内存中的哈希表，以便快速查找
        from_data = list(self.storage_engine.scan(from_table_name))

        # 3. 遍历主表，执行连接、过滤和更新操作
        updates_to_perform = []
        for main_rid, main_row in self.storage_engine.scan_with_rid(main_table_name):
            for from_row in from_data:
                # 3a. 将主表行和 from 表行拼接成一个“连接后”的行
                joined_row = main_row + from_row

                # 3b. 使用完整的 WHERE 条件对拼接后的行进行评估
                if self._eval_predicate(joined_row, predicate, joined_schema):
                    # 如果条件满足，则计算新值
                    new_tuple = list(main_row)
                    for assign in assignments:
                        col_name = assign['column']['name']

                        # 3c. 在“连接后”的行和 schema 的上下文中计算表达式 (例如 age + 1)
                        # --- 第一次的修复：为 SET 子句的表达式求值传入主表的 schema ---
                        new_value = self._eval_expression(assign['value'], joined_row, main_schema)
                        # --- 修复结束 ---

                        try:
                            # 在主表 schema 中找到要更新的列的索引
                            col_index = main_schema.column_order.index(col_name)

                            # --- 最终修复：开始 ---
                            # 1. 直接从 schema 中获取目标列的类型字符串
                            column_name_from_index = main_schema.column_order[col_index]
                            target_column_type = main_schema.columns[
                                column_name_from_index]  # 这将得到 'INTEGER' 或 'VARCHAR'
                            target_column_name = column_name_from_index

                            # 2. 根据类型字符串进行判断和转换
                            if target_column_type == 'INTEGER':
                                # 确保在写入整数列之前，值是整数类型
                                try:
                                    if isinstance(new_value, float) and new_value.is_integer():
                                        new_value = int(new_value)
                                    elif not isinstance(new_value, int):
                                        new_value = int(new_value)
                                except (ValueError, TypeError):
                                    raise TypeError(
                                        f"无法将值 '{new_value}' 赋给 INTEGER 类型的列 '{target_column_name}'")

                            elif target_column_type == 'VARCHAR':
                                # 确保在写入字符串列之前，值是字符串类型
                                if not isinstance(new_value, str):
                                    new_value = str(new_value)
                            # --- 最终修复：结束 ---

                            new_tuple[col_index] = new_value
                        except ValueError:
                            raise RuntimeError(f"列 '{col_name}' 在表 '{main_table_name}' 中不存在。")

                    # 记录需要执行的更新
                    updates_to_perform.append({'rid': main_rid, 'new_tuple': new_tuple})
                    # 注意: 这里我们找到一个匹配就跳出内层循环，以防一个主表行被多次更新
                    break

                    # 4. 在所有检查完成后，一次性执行所有更新
        for update in updates_to_perform:
            self.storage_engine.update_tuple(main_table_name, update['rid'], update['new_tuple'])

        print(f"执行结果: 在 '{main_table_name}' 中更新了 {len(updates_to_perform)} 行数据。")
        return []

    def _execute_show_tables(self, plan: dict):
        """
        执行 SHOW TABLES 操作。
        从目录管理器获取所有表的名称。
        """
        table_names = self.catalog_manager.get_all_table_names()
        # 将表名列表 [ 'table1', 'table2' ] 转换为
        # 执行器标准的输出格式 [ ['table1'], ['table2'] ]
        return [[name] for name in table_names]

    def _apply_filter(self, rows: list, predicate: dict, table_name: str) -> list:
        schema = self.catalog_manager.get_table_schema(table_name)
        if not schema:
            return []
        return [row for row in rows if self._eval_predicate(row, predicate, schema)]

    def _apply_aggregate_filter(self, rows: list, predicate: dict, schema: list) -> list:
        if not schema:
            return []
        return [row for row in rows if self._eval_predicate(row, predicate, schema)]

    def _eval_predicate(self, row: list, predicate: dict, schema: any) -> bool:
        op = predicate['op']

        # --- 处理逻辑运算符 ---
        if op == 'AND':
            return self._eval_predicate(row, predicate['left'], schema) and self._eval_predicate(row,
                                                                                                 predicate['right'],
                                                                                                 schema)
        if op == 'OR':
            return self._eval_predicate(row, predicate['left'], schema) or self._eval_predicate(row, predicate['right'],
                                                                                                schema)
        if op == 'NOT':
            return not self._eval_predicate(row, predicate['operand'], schema)

        # --- 处理 IS NULL ---
        if op == 'IS_NULL':
            left_node = predicate['left']
            col_name = left_node['name']
            column_order = schema.column_order if isinstance(schema, TableSchema) else schema
            try:
                col_index = column_order.index(col_name)
            except ValueError:
                raise RuntimeError(f"列 '{col_name}' 在表中不存在。")
            cell_value = row[col_index]
            is_null = cell_value is None
            return not is_null if predicate.get('is_not') else is_null

        # --- V V V V V V V V V V V V ---
        # ---      核心修正      ---
        # --- V V V V V V V V V V V V ---
        # 根据测试生成的执行计划，LIKE 操作的匹配模式存储在 'pattern' 键下。
        # 此处修正代码，使其从正确的键获取数据，解决 KeyError。
        if op == 'LIKE':
            left_node = predicate['left']
            # 这是关键的修改：从 'pattern' 键获取 LIKE 的模式节点
            pattern_node = predicate['pattern']
            col_name = left_node['name']

            column_order = schema.column_order if isinstance(schema, TableSchema) else schema
            try:
                col_index = column_order.index(col_name)
            except ValueError:
                raise RuntimeError(f"LIKE 子句中的列 '{col_name}' 在表中不存在。")

            cell_value_str = str(row[col_index])
            pattern_value = pattern_node['value']

            # 将SQL的LIKE模式转换为正则表达式
            regex_pattern = pattern_value.replace('%', '.*').replace('_', '.')
            return re.match(f"^{regex_pattern}$", cell_value_str) is not None
        # --- ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ---
        # ---      修正结束      ---
        # --- ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ^ ---

        # 处理IN子查询
        if predicate.get('op') == 'IN':
            return self._eval_in_predicate(row, predicate, schema)

        # --- 处理常规的二元比较运算符 ---
        left_node = predicate['left']
        right_node = predicate['right']
        col_name = left_node['name']

        column_order = schema.column_order if isinstance(schema, TableSchema) else schema
        table_name = schema.table_name if isinstance(schema, TableSchema) else "aggregated_result"

        try:
            col_index = column_order.index(col_name)
        except ValueError:
            raise RuntimeError(f"列 '{col_name}' 在表 '{table_name}' 中不存在。")

        cell_value_str = row[col_index]

        right_value = None
        if right_node.get('type') == 'identifier':
            right_col_name = right_node['name']
            try:
                right_col_index = column_order.index(right_col_name)
                right_value = row[right_col_index]
            except ValueError:
                raise RuntimeError(f"列 '{right_col_name}' 在表中不存在。")
        else:
            right_value = right_node['value']

        try:
            cell_value_num = float(cell_value_str) if '.' in str(cell_value_str) else int(cell_value_str)
            right_value_num = float(right_value) if '.' in str(right_value) else int(right_value)
            cell_value, literal_value = cell_value_num, right_value_num
        except (ValueError, TypeError, AttributeError):
            cell_value, literal_value = str(cell_value_str), str(right_value)

        if op == '=': return cell_value == literal_value
        if op in ('!=', '<>'): return cell_value != literal_value
        if op == '>': return cell_value > literal_value
        if op == '<': return cell_value < literal_value
        if op == '>=': return cell_value >= literal_value
        if op == '<=': return cell_value <= literal_value

        return False

    def _eval_in_predicate(self, row: list, predicate: dict, schema: any) -> bool:
        """处理IN子查询"""
        col_name = predicate['identifier']['name']

        # 获取当前行的列值
        column_order = schema.column_order if isinstance(schema, TableSchema) else schema
        try:
            col_index = column_order.index(col_name)
        except ValueError:
            raise RuntimeError(f"列 '{col_name}' 在表中不存在。")

        cell_value = row[col_index]

        if 'subquery_plan' in predicate:
            # 执行子查询
            subquery_result = self.execute(predicate['subquery_plan'])
            # 检查当前值是否在子查询结果中
            for result_row in subquery_result:
                if result_row and len(result_row) > 0 and result_row[0] == cell_value:
                    return True
        elif 'values' in predicate:
            # 检查值列表
            value_list = [v['value'] for v in predicate['values']]
            return cell_value in value_list

        return False

    def _eval_expression(self, expr: dict, row: list, schema: any) -> any:
        """
        递归地计算AST中的表达式。
        支持: 字面量, 列标识符, 基本的二元算术运算, 以及字符串连接。
        """
        expr_type = expr.get('type')

        if expr_type == 'literal':
            # 如果是字面量（比如数字 1 或字符串 'Advanced '），直接返回值
            return expr['value']

        if expr_type == 'identifier':
            # 如果是列名（比如 course_name），从当前行数据中找到对应的值
            col_name = expr['name']
            # --- 修正开始：处理全限定名 (例如 'students.age') ---
            unqualified_col_name = col_name.split('.')[-1]
            # --- 修正结束 ---
            column_order = schema.column_order if hasattr(schema, 'column_order') else schema
            try:
                # 优先尝试用全限定名匹配 (例如 'courses.student_id')
                col_index = column_order.index(col_name)
                return row[col_index]
            except ValueError:
                # 如果失败，再尝试用非限定名匹配 (例如 'age')
                # 这对于处理 UPDATE ... FROM ... WHERE a.id = b.id AND age > 21 这种情况至关重要
                try:
                    col_index = column_order.index(unqualified_col_name)
                    return row[col_index]
                except ValueError:
                     raise RuntimeError(f"表达式求值错误: 列 '{col_name}' 在可用列中不存在: {column_order}。")


        if expr_type == 'binary_op':
            # 如果是二元运算
            op = expr['op']
            # 递归计算左边和右边的值
            left_val = self._eval_expression(expr['left'], row, schema)
            right_val = self._eval_expression(expr['right'], row, schema)

            # --- 核心修改开始 ---
            # 新增：专门处理字符串连接操作符 ||
            if op == '||':
                # 将两边的值都转换为字符串然后拼接
                return str(left_val) + str(right_val)
            # --- 核心修改结束 ---

            # （以下是处理算术运算的原始逻辑）
            try:
                left_num = float(left_val)
                right_num = float(right_val)
            except (ValueError, TypeError):
                # 提供了更详细的错误信息
                raise TypeError(f"无法对非数字类型的值进行算术运算: '{left_val}', '{right_val}' (操作符: {op})")

            result = 0.0
            if op == '+':
                result = left_num + right_num
            elif op == '-':
                result = left_num - right_num
            elif op == '*':
                result = left_num * right_num
            elif op == '/':
                if right_num == 0:
                    raise ZeroDivisionError("表达式求值错误: 除数为零。")
                result = left_num / right_num
            else:
                raise NotImplementedError(f"表达式求值错误: 不支持的二元运算符 '{op}'。")

            if result == int(result):
                return int(result)
            return result

        raise NotImplementedError(f"表达式求值错误: 不支持的表达式类型 '{expr_type}'。")

    def _execute_explain(self, plan: dict):
        """执行 EXPLAIN，格式化打印内部的查询计划"""
        inner_plan = plan['plan']

        def format_plan(p, indent=0):
            lines = []
            prefix = "  " * indent
            op = p.get('operation', 'Unknown')
            details = []
            for key, value in p.items():
                if key not in ['operation', 'source', 'left_source', 'right_source', 'plan']:
                    details.append(f"{key}={value}")

            lines.append(f"{prefix}-> {op}: {', '.join(details)}")

            if 'source' in p:
                lines.extend(format_plan(p['source'], indent + 1))
            if 'left_source' in p:
                lines.append(f"{prefix}  [LEFT]:")
                lines.extend(format_plan(p['left_source'], indent + 2))
            if 'right_source' in p:
                lines.append(f"{prefix}  [RIGHT]:")
                lines.extend(format_plan(p['right_source'], indent + 2))

            return lines

        formatted_lines = format_plan(inner_plan)
        print("--- Execution Plan ---")
        for line in formatted_lines:
            print(line)
        print("----------------------")

        # EXPLAIN 不返回数据行
        return []