package com.yuaer.demo.springsparkjobhandlerdemo.executor;

import com.yuaer.demo.springsparkjobhandlerdemo.executor.processor.NodeProcessor;
import com.yuaer.demo.springsparkjobhandlerdemo.executor.processor.NodeProcessorFactory;
import com.yuaer.demo.springsparkjobhandlerdemo.model.Node;
import org.apache.spark.sql.*;

import java.util.*;
import java.util.concurrent.*;
import java.util.stream.Collectors;

/**
 * DAG执行调度器，按拓扑顺序执行节点任务，支持并行。 
 */
public class DagExecutor {
    // 执行上下文
    private final GlobalContext globalContext;
    // 节点映射
    private final Map<String, Node> nodeMap;
    // 反向依赖关系
    private final Map<String, List<String>> reverseDeps = new HashMap<>();
    // 结果映射
    public final Map<String, CompletableFuture<Dataset<Row>>> results = new ConcurrentHashMap<>();

    // 已调度节点队列, 用于避免重复调度
    private final Set<String> scheduled = ConcurrentHashMap.newKeySet();

    // 并行线程池
    private final ExecutorService executor = Executors.newFixedThreadPool(8); // todo 可改为配置


    // 构造函数，初始化执行上下文、节点映射和反向依赖关系
    public DagExecutor(GlobalContext globalContext, List<Node> nodes) {
        // 初始化执行上下文
        this.globalContext = globalContext;
        // 将节点列表转换为节点映射，以节点ID为键，节点为值
        this.nodeMap = nodes.stream().collect(Collectors.toMap(Node::getId, n -> n));
        // 遍历节点列表
        for (Node node : nodes) {
            // 遍历节点的依赖关系
            for (String dep : node.getDependsOn()) {
                // 将依赖关系添加到反向依赖关系中
                reverseDeps.computeIfAbsent(dep, k -> new ArrayList<>()).add(node.getId());
            }
        }
    }

    // 执行DAG
    public Map<String, Dataset<Row>> execute() {
        // 获取所有没有依赖关系的节点
        Set<String> ready = nodeMap.values().stream()
                .filter(n -> n.getDependsOn().isEmpty())
                .map(Node::getId)
                .collect(Collectors.toSet());

        // 执行所有没有依赖关系的节点  目前结构中实际上只会有一个节点(即根节点)
        for (String nodeId : ready) {
            executeNodeAsync(nodeId);
        }

        // 等待所有节点执行完毕
        CompletableFuture.allOf(results.values().toArray(new CompletableFuture[0])).join();

        // 返回结果
        return results.entrySet().stream().collect(Collectors.toMap(Map.Entry::getKey, e -> e.getValue().join()));
    }

    // 异步执行节点
    private void executeNodeAsync(String nodeId) {
        // 获取节点和父节点ID
        Node node = nodeMap.get(nodeId);
        List<String> parentIds = node.getDependsOn();

        // 获取父节点的Future
        List<CompletableFuture<Dataset<Row>>> parentFutures = parentIds.stream()
                .map(results::get)
                .collect(Collectors.toList());

        // 异步执行节点
        CompletableFuture<Dataset<Row>> current = CompletableFuture
                .allOf(parentFutures.toArray(new CompletableFuture[0]))
                .thenApplyAsync(v -> {
                    Map<String, Dataset<Row>> inputMap = new HashMap<>();
                    for (int i = 0; i < parentIds.size(); i++) {
                        //todo 此处如果id不足以作为上游数据的key 可以考虑在node中增加特定属性或者使用type字段, 这里做保留项暂时默认使用id作为下游数据key值
                        inputMap.put(parentIds.get(i), parentFutures.get(i).join());
                    }
                    NodeContext nodeCtx = new NodeContext(nodeId, inputMap, node.getParams());
                    NodeProcessor processor = NodeProcessorFactory.getProcessor(node.getType());
                    Dataset<Row> output = processor.process(globalContext, nodeCtx);
//                    output.persist(StorageLevel.MEMORY_AND_DISK()); // 可选 多上游依赖时可打开用于缓存结果,防止重复计算, 但需要增加监听机制以执行unpersist操作释放资源
                    return output;
                }, executor);

        // 将节点Future放入结果映射
        results.put(nodeId, current);

        // 启动后续节点
        List<String> children = reverseDeps.getOrDefault(nodeId, Collections.emptyList());
        for (String childId : children) {
            Node child = nodeMap.get(childId);
            if (results.keySet().containsAll(child.getDependsOn())) {
                if (scheduled.add(childId)) {
                    executeNodeAsync(childId);
                }
            }
        }
    }
}



