package com.business.etl.flow.service;

import cn.hutool.core.date.DateUtil;
import cn.hutool.core.date.TimeInterval;
import com.business.etl.flow.log.MQLogger;
import com.business.etl.flow.mq.LogProducer;
import com.business.etl.flow.process.*;
import com.business.etl.flow.status.StatusManager;
import com.google.common.graph.Graph;
import com.component.api.constants.FlowStatus;
import com.component.api.constants.NodeStatus;
import com.component.api.model.AbstractData;
import com.component.api.model.PortData;
import com.component.api.model.ProcessResult;
import com.component.api.model.data.SetData;
import com.component.api.model.data.StaticRowData;
import com.component.api.model.flow.FlowInfo;
import com.component.api.model.flow.NodeInfo;
import com.component.api.model.flow.RelationInfo;
import com.component.api.model.param.TableColumn;
import com.component.api.utils.FlowUtils;
import com.etl.component.common.AbstractFlinkComponent;
import com.common.log.Logger;
import com.common.model.exception.FlowAnalysisException;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.exception.ExceptionUtils;
import org.apache.flink.api.java.DataSet;
import org.apache.flink.api.java.ExecutionEnvironment;
import org.apache.flink.api.java.Utils;
import org.apache.flink.api.java.operators.MapOperator;
import org.apache.flink.types.Row;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;

import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Set;
import java.util.concurrent.*;
import java.util.stream.Collectors;

/**
 * 描述：
 * 流程服务
 * @author xianggj
 * @Date 2021/10/14 17:10
 **/
@Slf4j
@Service
public class FlowService {

    /**
     * 并发运行的流程数
     */
    public static final int RUNING_FLOW_NUM = 5;

    /**
     * 等待队列
     */
    public static final ConcurrentLinkedQueue<FlowInfo> WAIT_QUEUE = new ConcurrentLinkedQueue<>();

    /**
     * 流程阻塞队列
     */
    public static final ArrayBlockingQueue<String> FLOW_QUEUE = new ArrayBlockingQueue<>(RUNING_FLOW_NUM);

    /**
     * 线程池
     */
    public ExecutorService executorService = Executors.newFixedThreadPool(6);


    @Autowired
    private LogProducer logProducer;


    @Autowired
    private StatusManager statusManager;


    @Autowired
    private ResultCacheByRedis resultCacheByRedis;

    /**
     * 准备运行
     * 添加运行队列
     * @param flowInfo
     */
    public  void readyRun(FlowInfo flowInfo){

        //数量达到5个以上 进入等待队列
        if (FLOW_QUEUE.offer(flowInfo.getFlowId())) {
            log.info("当前运行中流程队列" + FLOW_QUEUE.toString());
            log.info("任务"+flowInfo.getFlowId()+"开始执行...");
            executorService.execute(()->exec(flowInfo));
        }else{
            log.info("当前运行中流程队列" + FLOW_QUEUE.toString());
            log.info("任务"+flowInfo.getFlowId()+"进入等待...");
            WAIT_QUEUE.offer(flowInfo);
            statusManager.setFlowStatus(flowInfo.getFlowId(), FlowStatus.WAIT);
        }
    }

    /**
     * 执行流程
     * @param flowInfo
     */
    public void exec(FlowInfo flowInfo ){

        //更新状态
        statusManager.setFlowStatus(flowInfo.getFlowId(), FlowStatus.START);
        //日志对象
        MQLogger logger = new MQLogger(logProducer, flowInfo.getFlowId());
        logger.setFlowName(flowInfo.getFlowName());
        FlowContext flowContext = new FlowContext(flowInfo);
        //保证数据唯一
        AbstractFlinkComponent.putEnv(flowContext.hashCodeStr(), ExecutionEnvironment.getExecutionEnvironment());
        flowContext.setStatusManager(statusManager);
        logger.info("流程提交成功.");
        TimeInterval timer = DateUtil.timer();
        List<NodeInfo> nodes = flowInfo.getNodes();
        if (nodes.isEmpty()){
            logger.error("运行节点为空");
            return;
        }

        List<RelationInfo> edges = flowInfo.getEdges();
        //可以作为起始节点的节点id
        Set<NodeInfo> starts = new HashSet<>();
        //图
        Graph<NodeInfo> graph = null;
        try {
            graph = FlowUtils.analysisGraph(nodes, edges, starts);
        } catch (Exception e) {
            e.printStackTrace();
            logger.error("运行数据解析异常："+ e.getMessage());
            throw new FlowAnalysisException("流程解析异常，流程数据不合法");
        }
        flowContext.setGraph(graph);
        //保证上下文统一
        logger.info("流程初始化完成.");
        //新建一个流程中间处理类
        ProcessHandler processHandler = new ProcessHandler(flowInfo.getFlowId());
        flowContext.setProcessHandler(processHandler);
        //计数器 避免产生多个线程 子线程也新开线程执行
        CountDownLatch cdl = new CountDownLatch(nodes.size());
        flowContext.setCdl(cdl);

        statusManager.setFlowStatus(flowInfo.getFlowId(), FlowStatus.RUNNING);
        flowContext.setFlowStatus(FlowStatus.RUNNING);
        logger.info("流程开始执行...");
        try {
            runNodes(starts, flowContext);
        } catch (Throwable e) {
            e.printStackTrace();
            logger.error("流程执行失败"+ ExceptionUtils.getStackTrace(e));
        }

        try {
            //等待所有线程结束
            cdl.await();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        //执行完成后的动作
        completeDone(flowContext, logger, timer);

    }

    /**
     * 执行完成后检测
     * @param flowContext 流程信息
     * @param logger 流程日志
     */
    private void completeDone(FlowContext flowContext, Logger logger, TimeInterval timer ) {
        FlowInfo flowInfo = flowContext.getFlowInfo();
        logger.info("运行结束.");
        logger.info("运行总耗时 " + timer.interval() + "ms");
        //释放资源
        logger.release();
        if (!(FlowStatus.FAIL.equals(flowContext.getFlowStatus()) || FlowStatus.STOP.equals(flowContext.getFlowStatus()))){
            //状态更新 为成功
            statusManager.setFlowStatus(flowInfo.getFlowId(), FlowStatus.FINISH);
        }
        Boolean isNeedViewResult = flowInfo.getIsNeedViewResult();
        if (isNeedViewResult){
            //添加中间结果
            for (NodeInfo nodeInfo : flowInfo.getNodes()) {
                String nodeId = nodeInfo.getId();
                //这个地方有可能还没执行完全
                resultCacheByRedis.addStrs(nodeId, ResultCache.get(nodeId));
            }
        }
        //清除
        AbstractFlinkComponent.clearEnv(flowContext.hashCodeStr());
        removeKey(flowInfo.getFlowId());
        //释放等待队列
        flowContext = null;
    }


    /**
     * 清除队列中的值
     * @param flowId
     * @return
     */
    public  void removeKey(String flowId){
        //保证数量 移除一个就行了 这样设计就不能判断哪些流程在运行中了 只能根据状态判断
        FLOW_QUEUE.remove(flowId);
        FlowInfo poll = WAIT_QUEUE.poll();
        if (poll != null){
            log.info("任务"+poll.getFlowId()+"等待结束...");
            readyRun(poll);
        }
    }

    /**
     * 运行节点
     * @param startIds 当前运行节点
     * @param flowContext 流程上下文
     */
    private void runNodes(Set<NodeInfo> startIds, FlowContext flowContext) {
        List<CompletableFuture<ProcessResult>> futures = new ArrayList<>();
        ProcessHandler processHandler = flowContext.getProcessHandler();
        CountDownLatch cdl = flowContext.getCdl();
        Graph<NodeInfo> graph = flowContext.getGraph();
        ThreadPoolExecutor threadPoolExecutor = new ThreadPoolExecutor(0, Integer.MAX_VALUE,
                60L, TimeUnit.SECONDS,
                new SynchronousQueue<Runnable>());
        for (NodeInfo startNode : startIds) {
            final String nodeId = startNode.getId();
            //运行中的线程不需要启动
            if (processHandler.isRunning(nodeId)) {
                continue;
            } else {
                //必须做到实时 不然创建线程的过程中可能出现判断错误
                //临时将当前线程放进去 等他自己的线程创建好后再替换
                processHandler.putThread(nodeId, Thread.currentThread());
            }
            statusManager.setNodeStatus(processHandler.getFlowId(), nodeId, NodeStatus.INIT);
            //创建日志处理对象
            MQLogger logger = new MQLogger(logProducer, processHandler.getFlowId());
            logger.setNodeId(nodeId);
            logger.setNodeName(startNode.getName());
            logger.info("初始化...");
            CompletableFuture<ProcessResult> futrue =
                    CompletableFuture.supplyAsync(new ExecNode(startNode, flowContext, logger),
                            threadPoolExecutor);
            futures.add(futrue);
            //异步递归 避免等待
            futrue.whenCompleteAsync((res, throwable)->{

                if (throwable != null) {
                    //节点运行报错 打印日志 放弃后续节点的执行
                    throwable.printStackTrace();
                    logger.error(throwable.getMessage());
                    statusManager.setNodeStatus(flowContext.getFlowInfo().getFlowId(),
                            startNode.getId(), NodeStatus.FAIL);
                    //流程设定状态为失败
                    statusManager.setFlowStatus(flowContext.getFlowInfo().getFlowId(),
                            FlowStatus.FAIL);
                    flowContext.setFlowStatus(FlowStatus.FAIL);
                    //记录已经结束的节点
                    Set<String> isStopNode = new HashSet<>();
                    //停止后续节点
                    stopAfterNode(flowContext, startNode, isStopNode , logger);
                } else {
                    Set<NodeInfo> successors = graph.successors(startNode);
                    //有后续节点
                    if (!successors.isEmpty()){
                        //保存结果 当没有后续节点的时候存这个没意义 还占用空间
                        processHandler.putValue(nodeId, res);
                        //递归运行后续节点
                        runNodes(successors, flowContext);
                    }
                }
                cdl.countDown();
            });
        }
        //中间数据的处理
        for(CompletableFuture<ProcessResult> f : futures){
            try {
                ProcessResult processResult = f.get();
                if (processResult == null){
                    continue;
                }
                //空数据表示 流程停止
                if (processResult.getOutputDatas() == null){
                    statusManager.setNodeStatus(processHandler.getFlowId(),
                            processResult.getNodeId(), NodeStatus.STOP);
                    continue;
                }
                //缓存结果
                Boolean isNeedViewResult = flowContext.getFlowInfo().getIsNeedViewResult();
                //需要记录节点和尾巴节点需要执行此操作
                if (isNeedViewResult){
                    //耗时操作 没办法
                    cacheResult(processResult);
                }
                statusManager.setNodeStatus(processHandler.getFlowId(),
                        processResult.getNodeId(), NodeStatus.FINISH);
            } catch (InterruptedException e) {
                e.printStackTrace();
            } catch (ExecutionException e) {
                e.printStackTrace();
            }
        }
    }

    /**
     * 缓存结果
     * @param processResult
     */
    private void cacheResult(ProcessResult processResult) {
        List<PortData> portDatas = processResult.getPortDatas();
        final String key = processResult.getNodeId();
        for (PortData portData : portDatas) {
            AbstractData abstractData = portData.getValue();
            if (abstractData!= null ){
                if (abstractData instanceof SetData){
                    SetData value = (SetData) abstractData;
                    List<TableColumn> columns = value.getColumns();
                    List<String> collect = columns.stream().map(e ->
                            e.getColumnName()).collect(Collectors.toList());
                    resultCacheByRedis.setHeader(key, collect);
                    DataSet<Row> dataset = (DataSet<Row>) value.getDataset();
                    //这里只有真正执行完成后才能执行取操作
                    dataset = new MapOperator<>(dataset, dataset.getType(), e -> {
                        ResultCache.set(key, AbstractFlinkComponent.toQuotedListString(e));
                        return e;
                    }, Utils.getCallLocationName());
                    value.setDataset(dataset);
                } else if (abstractData instanceof StaticRowData){
                    StaticRowData value = (StaticRowData) abstractData;
                    List<TableColumn> columns = value.getColumns();
                    List<String> collect = columns.stream().map(e ->
                            e.getColumnName()).collect(Collectors.toList());
                    resultCacheByRedis.setHeader(key, collect);
                    List<String> dataset = value.getDataset();
                    resultCacheByRedis.addRows(key, dataset);
                }
            }
        }
    }

    /**
     * 暂停后续节点
     * @param flowContext 流程上下文
     * @param nodeInfo 来源节点
     * @param isStopNode 已经处理后的节点
     * @param logger 日志文件
     */
    private void stopAfterNode(FlowContext flowContext, NodeInfo nodeInfo,
                               Set<String> isStopNode,  Logger logger) {
        Graph<NodeInfo> graph = flowContext.getGraph();
        ProcessHandler processHandler = flowContext.getProcessHandler();
        StatusManager statusManager = flowContext.getStatusManager();
        Set<NodeInfo> successors = graph.successors(nodeInfo);
        CountDownLatch cdl = flowContext.getCdl();
        if (successors.isEmpty()){
            return;
        }
        String name = nodeInfo.getName();
        //更新后续节点状态和日志
        for (NodeInfo node : successors) {
            //如果已经包括此节点  说明已经执行了stopAfterNode操作 因为有很多合并节点
            //这里的节点只有两种 1. 等待运行的节点 2. 还没运行的节点 如果有第三种 说明逻辑错误
            if (isStopNode.contains(node.getId())){
                continue;
            }
            isStopNode.add(nodeInfo.getId());
            if (processHandler.isRunning(node.getId())){
                processHandler.stop(node.getId());
            }

            String log = String.format("【%s】运行终止，因为节点【%s】运行错误" , node.getName(), name );
            logger.warn(log);
            //设置状态为停止
            statusManager.setNodeStatus(flowContext.getFlowInfo().getFlowId(),
                    node.getId(), NodeStatus.STOP);
            stopAfterNode(flowContext, node, isStopNode, logger);
            cdl.countDown();
        }
    }

}
