package com.Lab427.workflow.service.DAGExecutor;

import com.Lab427.workflow.common.dtos.ResponseResult;
import com.Lab427.workflow.common.enums.ParamsType;
import com.Lab427.workflow.pojo.WorkNode;
import com.Lab427.workflow.utils.WorkFlowUtils;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.scheduling.concurrent.ThreadPoolTaskExecutor;
import org.springframework.stereotype.Service;

import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.stream.Collectors;

/**
 * @author Yue
 * @date:2025/8/28 20:58
 */
@Service
public class DAGExecutorService {

    @Autowired
    private NodeServiceDispatcher nodeServiceDispatcher;

    @Autowired
    @Qualifier("globalExecutor")
    private ThreadPoolTaskExecutor globalExecutor;


    /**
     * @author Yue
     * @date:2025/8/28 20:58
     * input:开始处理拓扑图，得到节点拓扑排序，然后多线程执行
     * TODO:1.是否要封装成callable节点，然后内部执行任务，根据节点类型，调用不同的NodeService
     * TODO:2.进行拓扑排序，分成多个list，list中节点多线程执行
     *
     *
     */
    public ResponseResult execute(Map<String, Object> graph) {
        List<WorkNode> nodes= WorkFlowUtils.convertGraphToWorkNodes(nodeServiceDispatcher,graph);
        List<List<WorkNode>> lists = WorkFlowUtils.batchTopologicalSort(nodes);

        // 1保存每个节点的返回值
        Map<Long, String> results = new ConcurrentHashMap<>();
        for (List<WorkNode> batch : lists) {

            // 2. 注入上一批返回值到 config
            WorkFlowUtils.injectPreviousResults(batch, results);

            // 3. 批次内并行执行，并直接通过 supplyAsync 获取返回值
            List<CompletableFuture<String>> futures = batch.stream()
                    .map(node -> CompletableFuture.supplyAsync(() -> {
                                try {
                                    return node.call();  // Callable 的返回值
                                } catch (Exception e) {
                                    throw new RuntimeException(e);
                                }
                            }, globalExecutor)
                            .thenApply(result -> {
                                results.put(node.getId(), result);  // 保存结果
                                return result;  // 保持返回值
                            }))
                    .collect(Collectors.toList());

            // 4. 等待本批次完成
            CompletableFuture.allOf(futures.toArray(new CompletableFuture[0])).join();
            System.out.println("一批次处理完");
        }
        return ResponseResult.okResult("ok");
    }

    public ResponseResult execute(Map<String, Object> dagGraph, boolean isDynamic) throws InterruptedException {
        List<WorkNode> nodes= WorkFlowUtils.convertGraphToWorkNodes(nodeServiceDispatcher,dagGraph);
        int totalNodes = nodes.size();

        // 线程安全的入度表
        Map<Long, AtomicInteger> inDegree = new ConcurrentHashMap<>();
        Map<Long, List<WorkNode>> graph = new ConcurrentHashMap<>();

        // 初始化入度和邻接表
        for (WorkNode node : nodes) {
            inDegree.put(node.getId(), new AtomicInteger(node.getDependencies().size()));
            for (Long dep : node.getDependencies()) {
                graph.computeIfAbsent(dep, k -> new ArrayList<>()).add(node);
            }
        }

        // 队列存放入度为0的节点
        BlockingQueue<WorkNode> readyQueue = new LinkedBlockingQueue<>();
        for (WorkNode node : nodes) {
            if (inDegree.get(node.getId()).get() == 0) {
                readyQueue.put(node);
            }
        }

        AtomicInteger remaining = new AtomicInteger(totalNodes);

        while (remaining.get() > 0) {
            WorkNode node = readyQueue.poll(100, TimeUnit.MILLISECONDS); // 等待超时
            if (node == null) {
                continue; // 队列为空，但还有剩余节点，继续等待
            }
            globalExecutor.submit(() -> {
                try {
                    // 执行节点，拿到返回值
                    String result = node.call();

                    // 把结果注入到后继节点的 config 中
                    List<WorkNode> nextNodes = graph.getOrDefault(node.getId(), Collections.emptyList());
                    for (WorkNode next : nextNodes) {
                        synchronized (next) { // 保证 config 写入线程安全
                            if (next.getConfig() == null) {
                                next.setConfig(new ConcurrentHashMap<>());
                            }
                            Map<String, Object> outputDataMap= (Map<String, Object>) next.getConfig().getOrDefault(ParamsType.OUTPUT_LASTBATCH.getValue(),new HashMap<String,Object>());
                            outputDataMap.put("outputData"+node.getId().toString(),result);

                            // 假设 key 用前置节点 id
                            next.getConfig().put(ParamsType.OUTPUT_LASTBATCH.getValue(), outputDataMap);
                        }
                    }

                } catch (Exception e) {
                    e.printStackTrace(); // 或通过全局异常处理
                } finally {
                    // 通知后继节点是否可以入队
                    List<WorkNode> nextNodes = graph.getOrDefault(node.getId(), Collections.emptyList());
                    for (WorkNode next : nextNodes) {
                        if (inDegree.get(next.getId()).decrementAndGet() == 0) {
                            readyQueue.add(next);
                        }
                    }
                    remaining.decrementAndGet();
                    System.out.println(node.getId()+"处理完毕");
                    System.out.println(remaining);
                }
            });
        }
        System.out.println("所有节点处理完毕");

        return ResponseResult.okResult("ok");
    }

//    加入超时
public CompletableFuture<Map<Long, String>> executeDAGWithRetry(
        Map<String, Object> dagGraph,
        boolean isDynamic,
        int maxRetry,
        long timeoutSeconds) {

    List<WorkNode> nodes = WorkFlowUtils.convertGraphToWorkNodes(nodeServiceDispatcher, dagGraph);
    Map<Long, AtomicInteger> inDegree = new ConcurrentHashMap<>();
    Map<Long, List<WorkNode>> graph = new ConcurrentHashMap<>();
    Map<Long, String> results = new ConcurrentHashMap<>();
    Map<Long, CompletableFuture<String>> nodeFutures = new ConcurrentHashMap<>();

    // 初始化入度和邻接表，同时为每个节点创建 Future
    for (WorkNode node : nodes) {
        inDegree.put(node.getId(), new AtomicInteger(node.getDependencies().size()));
        for (Long dep : node.getDependencies()) {
            graph.computeIfAbsent(dep, k -> new ArrayList<>()).add(node);
        }
        // 为每个节点创建 Future
        nodeFutures.put(node.getId(), new CompletableFuture<>());
    }

    // 触发所有入度为0的节点
    for (WorkNode node : nodes) {
        if (inDegree.get(node.getId()).get() == 0) {
            executeNode(node, graph, inDegree, results, nodeFutures, maxRetry, timeoutSeconds);
        }
    }

    // 返回一个 CompletableFuture，当所有节点完成后返回 results
    return CompletableFuture.allOf(nodeFutures.values().toArray(new CompletableFuture[0]))
            .thenApply(v -> results);
}

    /**
     * 执行单个节点，带重试机制
     */
    private void executeNode(
            WorkNode node,
            Map<Long, List<WorkNode>> graph,
            Map<Long, AtomicInteger> inDegree,
            Map<Long, String> results,
            Map<Long, CompletableFuture<String>> nodeFutures,
            int retriesLeft,
            long timeoutSeconds) {

        CompletableFuture.supplyAsync(() -> {
                    try {
                        return node.call();
                    } catch (Exception e) {
                        throw new RuntimeException(e);
                    }
                }, globalExecutor)
                .orTimeout(timeoutSeconds, TimeUnit.SECONDS)
                .whenComplete((result, ex) -> {
                    if (ex != null) {
                        if (retriesLeft > 0) {
                            System.out.println("节点 " + node.getId() + " 执行失败，重试剩余次数：" + retriesLeft);
                            try { Thread.sleep(200); } catch (InterruptedException ignored) {}
                            executeNode(node, graph, inDegree, results, nodeFutures, retriesLeft - 1, timeoutSeconds);
                        } else {
                            nodeFutures.get(node.getId()).completeExceptionally(
                                    new RuntimeException("节点 " + node.getId() + " 执行失败或超时", ex));
                        }
                    } else {
                        // 节点执行成功
                        results.put(node.getId(), result);
                        nodeFutures.get(node.getId()).complete(result);

                        // 注入后继节点 config
                        List<WorkNode> nextNodes = graph.getOrDefault(node.getId(), Collections.emptyList());
                        for (WorkNode next : nextNodes) {
                            synchronized (next) {
                                if (next.getConfig() == null) next.setConfig(new ConcurrentHashMap<>());
                                Map<String, Object> outputDataMap =
                                        (Map<String, Object>) next.getConfig()
                                                .getOrDefault(ParamsType.OUTPUT_LASTBATCH.getValue(), new HashMap<>());
                                outputDataMap.put("outputData" + node.getId(), result);
                                next.getConfig().put(ParamsType.OUTPUT_LASTBATCH.getValue(), outputDataMap);
                            }

                            // 检查后继节点是否可以触发
                            if (inDegree.get(next.getId()).decrementAndGet() == 0) {
                                executeNode(next, graph, inDegree, results, nodeFutures, retriesLeft, timeoutSeconds);
                            }
                        }
                    }
                });
    }


}
