// 文件路径: dbms/engine/ExecutionEngine.java
package com.dbms.engine;

import com.dbms.catalog.Catalog;
import com.dbms.cluster.ClusterManager;
import com.dbms.cluster.ShardingManager;
import com.dbms.common.*;
import com.dbms.common.SecurityException;
import com.dbms.execution.execution.*;
import com.dbms.execution.udf.FunctionRegistry;
import com.dbms.parser.ast.*;
import com.dbms.storage.BufferPoolManager;
import com.dbms.transaction.LogManager;
import com.dbms.transaction.Transaction;
import com.dbms.transaction.TransactionManager;

import java.io.IOException;
import java.util.*;
import java.util.stream.Collectors;

/**
 * 执行引擎 (Execution Engine)。
 * 这是数据库的核心组件之一，负责将解析器生成的抽象语法树（AST）转换成一个可执行的物理查询计划。
 * 这个查询计划是由一系列“执行器”（Executor）组成的树状结构，每个执行器负责一个具体的操作（如扫描、过滤、连接）。
 *
 * 【已修复】
 * 1. 修正了 `buildInsertPlan` 和 `buildDeletePlan` 方法中对 Executor 构造函数的调用，
 * 添加了缺失的 `catalog` 参数，这是为了在执行插入和删除时能够进行外键约束的检查。
 * 2. 更新了 `buildCreateTablePlan` 以正确处理和传递外键定义。
 * 3. 修正了 `buildSelectPlan` 中对分布式查询（Scatter-Gather）的判断逻辑。
 */
public class ExecutionEngine {

    private final Catalog catalog;
    private final BufferPoolManager bufferPoolManager;
    private final String dbFileName;
    private String currentUser;
    private final TransactionManager transactionManager;
    private final LogManager logManager;
    private boolean forceNestedLoopJoin = false;
    private final FunctionRegistry functionRegistry;
    private final boolean isClusterMode;

    /**
     * ExecutionEngine 的构造函数。
     * @param catalog 目录管理器实例
     * @param bufferPoolManager 缓冲池管理器实例
     * @param dbFileName 数据库文件名
     * @param currentUser 初始用户名
     * @param tm 事务管理器实例
     * @param lm 日志管理器实例
     * @param fr 函数注册表示例
     * @param isClusterMode 是否为集群模式
     */
    public ExecutionEngine(Catalog catalog, BufferPoolManager bufferPoolManager, String dbFileName, String currentUser,
                           TransactionManager tm, LogManager lm, FunctionRegistry fr, boolean isClusterMode) {
        this.catalog = catalog;
        this.bufferPoolManager = bufferPoolManager;
        this.dbFileName = dbFileName;
        this.currentUser = currentUser;
        this.transactionManager = tm;
        this.logManager = lm;
        this.functionRegistry = fr;
        this.isClusterMode = isClusterMode;
    }

    public void setCurrentUser(String user) { this.currentUser = user; }
    /**
     * 设置是否强制使用嵌套循环连接。
     * @param force 如果为 true，则强制使用 NLJ。
     */
    public void setForceNestedLoopJoin(boolean force) { this.forceNestedLoopJoin = force; }
    /**
     * 构建查询计划的入口方法。
     * 它根据AST节点的类型，分发到相应的具体构建方法。
     * @param statement 解析器生成的AST根节点
     * @param isInternal 标记这个请求是否是来自集群内部的转发请求
     * @return 构建好的可执行计划的根执行器 (Executor)
     */
    public Executor buildPlan(StatementNode statement, boolean isInternal) {
        switch (statement.getStatementType()) {
            case "CREATE_TABLE": return buildCreateTablePlan((CreateTableNode) statement);
            case "INSERT": return buildInsertPlan((InsertNode) statement);
            case "SELECT": return buildSelectPlan((SelectNode) statement, isInternal);
            case "DELETE": return buildDeletePlan((DeleteNode) statement);
            case "UPDATE": return buildUpdatePlan((UpdateNode) statement);
            case Constants.STATEMENT_BACKUP: return buildBackupPlan((BackupNode) statement);
            case Constants.STATEMENT_CREATE_USER: return buildCreateUserPlan((CreateUserNode) statement);
            case Constants.STATEMENT_DROP_USER: return buildDropUserPlan((DropUserNode) statement);
            case Constants.STATEMENT_GRANT: return buildGrantPlan((GrantNode) statement);
            case Constants.STATEMENT_REVOKE: return buildRevokePlan((RevokeNode) statement);
            case Constants.STATEMENT_EXPLAIN: return buildExplainPlan((ExplainNode) statement, isInternal);
            default: throw new UnsupportedOperationException("Unsupported statement type: " + statement.getStatementType());
        }
    }
    private Executor buildDeletePlan(DeleteNode node) {
        checkPrivilege(node.getTableName(), "DELETE");
        Catalog.TableMetadata tableInfo = catalog.getTable(node.getTableName());
        if (tableInfo == null) throw new RuntimeException("Table not found: " + node.getTableName());
        if (node.getWhereClause() == null) throw new UnsupportedOperationException("DELETE without WHERE clause is not supported.");
        ExpressionNode where = node.getWhereClause();
        Integer keyToDelete = Integer.parseInt(where.getRightOperand().toString());
        Transaction currentTxn = transactionManager.getTransactionMap().values().stream().findFirst().orElse(null);
        // 【核心修正】在构造函数中添加了 catalog 参数
        return new DeleteExecutor(bufferPoolManager, tableInfo, keyToDelete, currentTxn, logManager, catalog);
    }

    /**
     * 构建 INSERT 语句的执行计划。
     */
    private Executor buildInsertPlan(InsertNode node) {
        checkPrivilege(node.getTableName(), "INSERT");
        Catalog.TableMetadata tableInfo = catalog.getTable(node.getTableName());
        if (tableInfo == null) { throw new RuntimeException("Table not found: " + node.getTableName()); }
        Schema schema = tableInfo.schema;
        List<Object> values = new ArrayList<>();
        for (int i = 0; i < node.getValues().size(); i++) {
            Column col = schema.getColumn(i);
            String valStr = node.getValues().get(i).replace("'", "");
            if (col.getColumnType() == Type.INTEGER) {
                values.add(Integer.parseInt(valStr));
            } else {
                values.add(valStr);
            }
        }
        Tuple tuple = new Tuple(values, schema);
        Transaction currentTxn = transactionManager.getTransactionMap().values().stream().findFirst().orElse(null);
        // 【核心修正】在构造函数中添加了 catalog 参数
        return new InsertExecutor(bufferPoolManager, tableInfo, List.of(tuple), currentTxn, logManager, catalog);
    }
    /**
     * 构建 CREATE TABLE 语句的执行计划。
     */
    private Executor buildCreateTablePlan(CreateTableNode node) {
        List<Column> columns = new ArrayList<>();
        for (com.dbms.catalogInParse.Column oldCol : node.getColumns()) {
            Type colType = oldCol.getType().equalsIgnoreCase("INT") ? Type.INTEGER : Type.VARCHAR;
            boolean isPrimaryKey = oldCol.isPrimaryKey();
            int varcharLength = oldCol.getLength();
            columns.add(new Column(oldCol.getName(), colType, varcharLength, isPrimaryKey));
        }
        Schema schema = new Schema(columns);
        // 【核心修正】将外键信息也传递给执行器
        return new CreateTableExecutor(catalog, node.getTableName(), schema, node.getForeignKeys());
    }
    // ... 其他所有方法保持不变 ...
    /**
     * 构建 SELECT 语句的执行计划。这是最复杂的一个构建方法。
     */
    private Executor buildSelectPlan(SelectNode node, boolean isInternal) {
        Catalog.TableMetadata tableInfo = catalog.getTable(node.getTableName());
        if (tableInfo == null) {
            throw new RuntimeException("Semantic Error: Table '" + node.getTableName() + "' not found.");
        }
        checkPrivilege(node.getTableName(), "SELECT");

        Executor plan;

        // --- 分布式查询计划 vs 本地查询计划的决策点 ---
        // 如果满足以下所有条件，则构建一个分布式查询计划 (Scatter-Gather):
        // 1. 当前处于集群模式 (isClusterMode)
        // 2. 这不是一个内部转发的请求 (!isInternal)
        // 3. 【核心修正】集群模式是数据分片 (SHARDING) 模式
        // 4. 表本身被标记为是分片的 (isSharded)
        // 5. 查询不是一个JOIN查询
        // 6. 从查询的WHERE子句中无法提取出单一的分片键 (意味着查询需要发往所有分片)
        // ---  修改这里的 IF 条件 ---
        if (isClusterMode && !isInternal &&
                // 【核心修正】增加对集群模式的判断
                ClusterManager.getInstance().getClusterMode() == ClusterManager.ClusterMode.SHARDING &&
                tableInfo.isSharded && node.getJoinNode() == null && ShardingManager.extractShardingKey(node) == null) {

            System.out.println("Planner: Detected a query requiring Scatter-Gather execution.");
            plan = new ScatterGatherExecutor(node, currentUser, catalog);
        } else {
            // --- 构建本地执行计划 ---
            // A. 构建计划的基础部分 (数据源)
            if (node.getJoinNode() == null) {
                // 单表查询，基础是扫描操作
                plan = buildSingleTableScan(node);
            } else {
                // JOIN查询，基础是连接操作
                plan = buildJoinPlan(node);
            }
// B. 在基础之上应用 WHERE 过滤条件
            //    (如果基础是索引扫描，则过滤条件已在扫描中应用，无需再次添加FilterExecutor)
            if (node.getWhereClause() != null && !(plan instanceof IndexScanExecutor)) {
                plan = applyFilter(plan, node.getWhereClause());
            }
        }

        // C. 应用 GROUP BY 聚合操作
        if (node.getGroupByNode() != null) {
            validateGroupBy(node.getColumns(), node.getGroupByNode().getColumns());
            plan = new AggregationExecutor(plan, node.getGroupByNode(), node.getColumns(), node.getHavingClause());
        }

        // D. 应用 ORDER BY 排序操作
        if (node.getOrderByNode() != null) {
            plan = new OrderByExecutor(plan, node.getOrderByNode());
        }

        // E. 应用 Projection (选择最终输出的列)
        //    (如果存在GROUP BY，投影逻辑已在AggregationExecutor中处理)
        if (node.getGroupByNode() == null) {
            plan = applyProjection(plan, node.getColumns());
        }

        // F. 应用 LIMIT 分页操作
        if (node.getLimitNode() != null) {
            plan = new LimitExecutor(plan, node.getLimitNode().getLimit(), node.getLimitNode().getOffset());
        }


        return plan;
    }
    // ... 其他所有方法的注释将遵循类似的详细风格 ...

    /**
     * 验证 GROUP BY 子句的语义正确性。
     * 规则：SELECT列表中的所有非聚合函数列，都必须出现在GROUP BY子句中。
     */
    private void validateGroupBy(List<Object> selectCols, List<String> groupByCols) {
        Set<String> groupBySet = new HashSet<>(groupByCols.stream().map(String::toLowerCase).collect(Collectors.toList()));
        for (Object item : selectCols) {
            if (item instanceof String) {
                if (!groupBySet.contains(((String) item).toLowerCase())) {
                    throw new RuntimeException("Semantic Error: SELECT column '" + item + "' must appear in GROUP BY clause or be used in an aggregate function.");
                }
            }
        }
    }

    private Executor buildExplainPlan(ExplainNode node, boolean isInternal) {
        Executor childPlan = buildPlan(node.getStatement(), isInternal);
        return new ExplainExecutor(childPlan);
    }
    private void checkPrivilege(String tableName, String privilege) {
        if (currentUser.equalsIgnoreCase("admin")) return;
        if (tableName.startsWith("_")) throw new SecurityException("Direct manipulation of system tables is not allowed.");
        Executor scan = new SeqScanExecutor(bufferPoolManager, catalog, "_privileges_");
        scan.open();
        Tuple tuple;
        boolean hasPermission = false;
        while ((tuple = scan.next()) != null) {
            if (tuple.getValue(0).toString().equalsIgnoreCase(currentUser) &&
                    tuple.getValue(1).toString().equalsIgnoreCase(tableName) &&
                    tuple.getValue(2).toString().equalsIgnoreCase(privilege)) {
                hasPermission = true;
                break;
            }
        }
        scan.close();
        if (!hasPermission) throw new SecurityException("User '" + currentUser + "' does not have " + privilege.toUpperCase() + " privilege on table '" + tableName + "'");
    }
    private Executor applyProjection(Executor inputPlan, List<Object> projectionItems) {
        Schema inputSchema = inputPlan.getSchema();
        if (inputSchema == null) {
            throw new IllegalStateException("Cannot apply projection: Input plan has a null schema.");
        }
        List<Column> outputColumns = new ArrayList<>();
        for (Object item : projectionItems) {
            if (item instanceof String) {
                String colName = (String) item;
                if ("*".equals(colName)) {
                    outputColumns.addAll(inputSchema.getColumns());
                    break;
                }
                outputColumns.add(findColumn(inputSchema, colName));
            } else if (item instanceof FunctionCallNode) {
                FunctionCallNode funcNode = (FunctionCallNode) item;
                Type returnType = inferUdfReturnType(funcNode.getFunctionName());
                if (returnType == Type.VARCHAR) {
                    outputColumns.add(new Column(funcNode.toString(), returnType, 255));
                } else {
                    outputColumns.add(new Column(funcNode.toString(), returnType));
                }
            }
        }
        Schema outputSchema = new Schema(outputColumns);
        return new ProjectionExecutor(outputSchema, projectionItems, inputPlan, this.functionRegistry);
    }
    private Type inferUdfReturnType(String functionName) {
        String upperName = functionName.toUpperCase();
        switch (upperName) {
            case "STRLEN": case "LENGTH": case "COUNT": case "ADD":
                return Type.INTEGER;
            case "UPPER": case "LCASE": case "LOWER": case "REVERSE": case "CONCAT": case "SUBSTR": case "MD5":
                return Type.VARCHAR;
            default:
                return Type.VARCHAR;
        }
    }
    private Executor buildSingleTableScan(SelectNode node) {
        Catalog.TableMetadata tableInfo = catalog.getTable(node.getTableName());
        ExpressionNode where = node.getWhereClause();
        if (where != null) {
            validateWhereClauseColumns(where, tableInfo.schema);
            if (where.getOperator().equals("=") && where.getLeftOperand().equalsIgnoreCase(tableInfo.schema.getColumn(0).getColumnName())) {
                Integer key = Integer.parseInt(where.getRightOperand().toString());
                return new IndexScanExecutor(bufferPoolManager, tableInfo, key);
            }
        }
        return new SeqScanExecutor(bufferPoolManager, catalog, node.getTableName());
    }
    private Executor buildJoinPlan(SelectNode node) {
        JoinNode joinNode = node.getJoinNode();
        String leftTableName = node.getTableName();
        String rightTableName = joinNode.getRightTable();
        checkPrivilege(leftTableName, "SELECT");
        checkPrivilege(rightTableName, "SELECT");
        Catalog.TableMetadata leftTableInfo = catalog.getTable(leftTableName);
        Catalog.TableMetadata rightTableInfo = catalog.getTable(rightTableName);
        if (leftTableInfo == null || rightTableInfo == null) {
            throw new RuntimeException("One or both tables for join not found.");
        }
        Executor leftScan = new SeqScanExecutor(bufferPoolManager, catalog, leftTableName);
        Executor rightScan = new SeqScanExecutor(bufferPoolManager, catalog, rightTableName);
        List<Column> joinColumns = new ArrayList<>(leftTableInfo.schema.getColumns());
        joinColumns.addAll(rightTableInfo.schema.getColumns());
        Schema joinOutputSchema = new Schema(joinColumns);
        ExpressionNode onCondition = joinNode.getOnCondition();
        int leftKeyIndex = findColumnIndex(leftTableInfo.schema, onCondition.getLeftOperand());
        int rightKeyIndex = findColumnIndex(rightTableInfo.schema, (String) onCondition.getRightOperand());
        if (leftKeyIndex == -1 || rightKeyIndex == -1) {
            throw new RuntimeException("Join key not found in one of the tables.");
        }
        Predicate joinPredicate = new Predicate(leftKeyIndex, rightKeyIndex);
        return forceNestedLoopJoin ? new NestedLoopJoinExecutor(leftScan, rightScan, joinPredicate, joinOutputSchema)
                : new HashJoinExecutor(leftScan, rightScan, joinPredicate, joinOutputSchema);
    }
    private Executor applyFilter(Executor inputPlan, ExpressionNode whereClause) {
        Schema inputSchema = inputPlan.getSchema();
        int colIndex = findColumnIndex(inputSchema, whereClause.getLeftOperand());
        Object value = whereClause.getRightOperand();
        if (inputSchema.getColumn(colIndex).getColumnType() == Type.INTEGER) {
            value = Integer.parseInt(value.toString());
        }
        Predicate predicate = new Predicate(colIndex, whereClause.getOperator(), value);
        return new FilterExecutor(predicate, inputPlan);
    }
    private void validateWhereClauseColumns(ExpressionNode where, Schema schema) {
        if (findColumnIndex(schema, where.getLeftOperand()) == -1) {
            throw new RuntimeException("Semantic Error: Column '" + where.getLeftOperand() + "' not found in table.");
        }
    }
    private int findColumnIndex(Schema schema, String columnName) {
        for (int i = 0; i < schema.getColumnCount(); i++) {
            if (schema.getColumn(i).getColumnName().equalsIgnoreCase(columnName)) {
                return i;
            }
        }
        return -1;
    }
    private Column findColumn(Schema schema, String columnName) {
        return schema.getColumns().stream()
                .filter(c -> c.getColumnName().equalsIgnoreCase(columnName))
                .findFirst()
                .orElseThrow(() -> new RuntimeException("Semantic Error: Column '" + columnName + "' not found."));
    }
    private Executor buildUpdatePlan(UpdateNode node) {
        checkPrivilege(node.getTableName(), "UPDATE");
        Catalog.TableMetadata tableInfo = catalog.getTable(node.getTableName());
        if (tableInfo == null) throw new RuntimeException("Table not found: " + node.getTableName());
        if (node.getWhereClause() == null) throw new UnsupportedOperationException("UPDATE without WHERE clause is not supported.");
        ExpressionNode where = node.getWhereClause();
        if (!where.getLeftOperand().equalsIgnoreCase(tableInfo.schema.getColumn(0).getColumnName())) {
            throw new UnsupportedOperationException("UPDATE only supports WHERE clause on the primary key.");
        }
        Integer keyToUpdate = Integer.parseInt(where.getRightOperand().toString().replace("'", ""));
        Map<String, Object> updates = new HashMap<>();
        for (ExpressionNode setExpr : node.getSetClause()) {
            String colNameToUpdate = setExpr.getLeftOperand();
            Object rawValue = setExpr.getRightOperand();
            Column col = tableInfo.schema.getColumns().stream().filter(c -> c.getColumnName().equalsIgnoreCase(colNameToUpdate)).findFirst()
                    .orElseThrow(() -> new RuntimeException("Semantic Error: Column '" + colNameToUpdate + "' not found in table '" + node.getTableName() + "'"));
            if (rawValue instanceof ArithmeticNode) {
                updates.put(colNameToUpdate, rawValue);
            } else {
                Object typedValue = rawValue;
                if (col.getColumnType() == Type.INTEGER && rawValue instanceof String) {
                    typedValue = Integer.parseInt(((String) rawValue).replace("'", ""));
                } else if (col.getColumnType() == Type.VARCHAR && rawValue instanceof String) {
                    typedValue = ((String) rawValue).replace("'", "");
                }
                updates.put(colNameToUpdate, typedValue);
            }
        }
        Transaction currentTxn = transactionManager.getTransactionMap().values().stream().findFirst().orElse(null);
        return new UpdateExecutor(bufferPoolManager, tableInfo, keyToUpdate, updates, currentTxn, logManager);
    }
    private Executor buildBackupPlan(BackupNode node) {
        try {
            bufferPoolManager.flushAllPages();
        } catch (IOException e) {
            throw new RuntimeException("Failed to flush pages before backup: " + e.getMessage());
        }
        return new BackupExecutor(this.dbFileName, node.getFilePath());
    }
    private Executor buildCreateUserPlan(CreateUserNode node) {
        Catalog.TableMetadata userInfo = catalog.getTable("_users_");
        String hashedPassword = Catalog.hashPassword(node.getPassword());
        List<Object> values = List.of(node.getUsername(), hashedPassword);
        Tuple userTuple = new Tuple(values, userInfo.schema);
        return new InsertExecutor(bufferPoolManager, userInfo, List.of(userTuple));
    }
    private Executor buildDropUserPlan(DropUserNode node) {
        return new DropUserExecutor(catalog, bufferPoolManager, node.getUsername());
    }
    private Executor buildGrantPlan(GrantNode node) {
        Catalog.TableMetadata privilegeInfo = catalog.getTable("_privileges_");
        List<Object> values = List.of(node.getUserName(), node.getTableName(), node.getPrivilege());
        Tuple privilegeTuple = new Tuple(values, privilegeInfo.schema);
        return new InsertExecutor(bufferPoolManager, privilegeInfo, List.of(privilegeTuple));
    }
    private Executor buildRevokePlan(RevokeNode node) {
        return new RevokeExecutor(catalog, bufferPoolManager, node.getPrivilege(), node.getTableName(), node.getUserName());
    }
}