package avicit.bdp.dds.server.master.runner;

import avicit.bdp.common.utils.SpringApplicationContext;
import avicit.bdp.common.utils.redis.RedisCacheHelper;
import avicit.bdp.dds.dispatch.enums.ExecutionStatus;
import avicit.bdp.dds.dispatch.model.TwoTuple;
import avicit.bdp.core.util.json.JSONUtils;
import avicit.bdp.dds.common.Constants;
import avicit.bdp.dds.common.thread.Stopper;
import avicit.bdp.dds.common.utils.LoggerUtils;
import avicit.bdp.dds.dao.entity.TaskInstance;
import avicit.bdp.dds.remote.command.TaskKillRequestCommand;
import avicit.bdp.dds.remote.utils.Host;
import avicit.bdp.dds.server.master.dispatch.context.ExecutionContext;
import avicit.bdp.dds.server.master.dispatch.enums.ExecutorType;
import avicit.bdp.dds.server.master.dispatch.executor.NettyExecutorManager;
import org.apache.commons.lang3.StringUtils;
import org.slf4j.LoggerFactory;

import java.util.Date;

public class SparkFlowTaskExecThread extends MasterBaseTaskExecThread {


    /**
     * whether already Killed,default false
     */
    private boolean alreadyKilled = false;

    private NettyExecutorManager nettyExecutorManager;

    /**
     * constructor of MasterBaseTaskExecThread
     *
     * @param taskInstance task instance
     */
    public SparkFlowTaskExecThread(TaskInstance taskInstance) {
        super(taskInstance);
        this.nettyExecutorManager = SpringApplicationContext.getBean(NettyExecutorManager.class);
    }


    @Override
    public TwoTuple<Boolean, String> submitWaitComplete() {
        TwoTuple<Boolean, String> result = new TwoTuple<>(true, null);
        try {
            logger = LoggerFactory.getLogger(LoggerUtils.buildTaskId(LoggerUtils.TASK_LOGGER_INFO_PREFIX,
                    taskInstance.getProcessDefinitionId(),
                    taskInstance.getProcessInstanceId(),
                    taskInstance.getId()));
            String threadLoggerInfoName = String.format(Constants.TASK_LOG_INFO_FORMAT, processService.formatTaskAppId(this.taskInstance));
            Thread.currentThread().setName(threadLoggerInfoName);
            this.dispatchTask(this.taskInstance);
            logger.info("sparkFlow task start");
            processInstance = processService.findProcessInstanceById(taskInstance.getProcessInstanceId());

            if (!ExecutionStatus.of(processInstance.getState()).typeIsFinished()) {
                result = waitTaskQuit();
            }

        } catch (Exception e) {
            logger.error("sparkFlow task run exception", e);
        }
        return result;
    }

    public TwoTuple<Boolean, String> waitTaskQuit() {
        TwoTuple<Boolean, String> result = new TwoTuple<>(false, null);

        while (Stopper.isRunning()) {
            try {
                if (this.processInstance == null) {
                    logger.error("process instance not exists , master task exec thread exit");
                    result.setFirst(true);
                    return result;
                }
                // task instance add queue , waiting worker to kill
                if (this.cancel || this.processInstance.getState() == ExecutionStatus.READY_STOP.getCode()) {
                    //sparkFlowTask节点的host地址
                    cancelTaskInstance();
                }
                if (processInstance.getState() == ExecutionStatus.READY_PAUSE.getCode()) {
                    //TODO 暂时未做暂停的处理
                    processInstance.setState(ExecutionStatus.PAUSE.getCode());
                    processInstance.setEndTime(new Date());
                    processService.updateProcessInstance(processInstance);
                }

                processInstance = processService.findProcessInstanceById(processInstance.getId());
                if (processInstance.isProcessInstanceStop()) {
                    break;
                }
                Thread.sleep(Constants.SLEEP_TIME_MILLIS);
            } catch (Exception e) {
                logger.error("exception", e);
                if (processInstance != null) {
                    logger.error("wait task quit failed, instance id:{}, task id:{}",
                            processInstance.getId(), taskInstance.getId());
                }
            }
        }
        result.setFirst(true);
        return result;
    }

    /**
     * task instance add queue , waiting worker to kill
     */
    private void cancelTaskInstance() throws Exception {
        if (alreadyKilled) {
            return;
        }
        alreadyKilled = true;

        String taskStr = RedisCacheHelper.getInstance().get("bdp:dds:task:" + taskInstance.getId());
        //缓存信息是sparkFlow的虚拟节点, 主要是为了获取节点执行host，方便kill
        if (StringUtils.isNotBlank(taskStr)) {
            taskInstance = JSONUtils.parseObject(taskStr, TaskInstance.class);
        }

        TaskKillRequestCommand killCommand = new TaskKillRequestCommand();
        killCommand.setTaskInstanceId(taskInstance.getId());

        ExecutionContext executionContext = new ExecutionContext(killCommand.convert2Command(), ExecutorType.WORKER);

        Host host = Host.of(taskInstance.getHost());
        executionContext.setHost(host);

        nettyExecutorManager.executeDirectly(executionContext);

        logger.info("master kill taskInstance name :{} taskInstance id:{}, host: {}",
                taskInstance.getName(), taskInstance.getId(), taskInstance.getHost());
    }


}
