package cn.spdb.harrier.server.master;

import java.text.ParseException;
import java.time.Duration;
import java.time.LocalDate;
import java.time.LocalDateTime;
import java.time.format.DateTimeFormatter;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.HashMap;
import java.util.List;
import java.util.Optional;
import java.util.concurrent.Executors;
import java.util.concurrent.ScheduledExecutorService;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

import javax.annotation.PostConstruct;
import javax.annotation.PreDestroy;

import org.apache.commons.lang3.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import org.springframework.util.CollectionUtils;

import com.alibaba.fastjson.JSON;

import cn.spdb.harrier.common.Constants;
import cn.spdb.harrier.common.enmus.ExecutionStatus;
import cn.spdb.harrier.common.enmus.Message;
import cn.spdb.harrier.common.enmus.RegistryState;
import cn.spdb.harrier.common.enmus.UdsJobType;
import cn.spdb.harrier.common.enmus.alarm.AlarmCode;
import cn.spdb.harrier.common.enmus.alarm.AlarmLevel;
import cn.spdb.harrier.common.model.JobSignal;
import cn.spdb.harrier.common.utils.DateUtils;
import cn.spdb.harrier.common.utils.Host;
import cn.spdb.harrier.common.utils.IPUtils;
import cn.spdb.harrier.common.utils.NameThreadFactory;
import cn.spdb.harrier.common.utils.SnowFlakeBuidID;
import cn.spdb.harrier.common.utils.Stopper;
import cn.spdb.harrier.common.utils.Symbol;
import cn.spdb.harrier.dao.cache.SystemCache;
import cn.spdb.harrier.dao.entity.UdsJob;
import cn.spdb.harrier.dao.entity.UdsJobConfig;
import cn.spdb.harrier.dao.entity.UdsJobDateFrequency;
import cn.spdb.harrier.dao.entity.UdsJobDependency;
import cn.spdb.harrier.dao.entity.UdsJobRecord;
import cn.spdb.harrier.dao.entity.UdsJobSelfSignal;
import cn.spdb.harrier.dao.entity.UdsJobSource;
import cn.spdb.harrier.dao.entity.UdsJobStep;
import cn.spdb.harrier.dao.entity.UdsJobStepRecord;
import cn.spdb.harrier.dao.entity.UdsJobTimeTrigger;
import cn.spdb.harrier.dao.entity.UdsServer;
import cn.spdb.harrier.dao.entity.UdsSystem;
import cn.spdb.harrier.dao.mapper.UdsJobConfigMapper;
import cn.spdb.harrier.dao.mapper.UdsJobDateFrequencyMapper;
import cn.spdb.harrier.dao.mapper.UdsJobDependencyMapper;
import cn.spdb.harrier.dao.mapper.UdsJobMapper;
import cn.spdb.harrier.dao.mapper.UdsJobRecordMapper;
import cn.spdb.harrier.dao.mapper.UdsJobSelfSignalMapper;
import cn.spdb.harrier.dao.mapper.UdsJobSourceMapper;
import cn.spdb.harrier.dao.mapper.UdsJobStepMapper;
import cn.spdb.harrier.dao.mapper.UdsJobStepRecordMapper;
import cn.spdb.harrier.dao.mapper.UdsJobTimeTriggerMapper;
import cn.spdb.harrier.server.entity.JobExecutionContext;
import cn.spdb.harrier.server.entity.JobStepBean;
import cn.spdb.harrier.server.entity.WorkingInfo;
import cn.spdb.harrier.server.master.cache.MasterMangerWorker;
import cn.spdb.harrier.server.master.conf.MasterConfig;
import cn.spdb.harrier.server.master.deal.PlatformDealExecutor;
import cn.spdb.harrier.server.master.dispath.SelectManger;
import cn.spdb.harrier.server.master.rpc.MasterRpc;
import cn.spdb.harrier.server.master.stream.SignalManager;
import cn.spdb.harrier.server.master.weight.WeightManger;
import cn.spdb.harrier.server.utils.AlarmSendUtils;
import cn.spdb.harrier.service.db.DbRegistryService;

@Component
public class MasterManagerService {

	private static Logger logger = LoggerFactory.getLogger(MasterManagerService.class.getSimpleName());

	@Autowired
	private UdsJobMapper jobMapper;

	@Autowired
	private UdsJobConfigMapper jobConfigMapper;

	@Autowired
	private UdsJobDependencyMapper dependencyMapper;

	@Autowired
	private UdsJobStepMapper stepMapper;

	@Autowired
	private UdsJobDateFrequencyMapper frequencyMapper;

	@Autowired
	private UdsJobTimeTriggerMapper triggerMapper;

	@Autowired
	private UdsJobRecordMapper recordMapper;

	@Autowired
	private UdsJobStepRecordMapper stepRecordMapper;

	@Autowired
	private WeightManger weightManger;

	@Autowired
	private UdsJobSelfSignalMapper selfSignalMapper;

	@Autowired
	private MasterMangerWorker mangerWorker;

	@Autowired
	private UdsJobSourceMapper sourceMapper;

	@Autowired
	private SystemCache systemCache;

	@Autowired
	private MasterRpc masterRpc;

	@Autowired
	private SignalManager signalManager;

	private HashMap<String, PlatformDealExecutor> platformThreadMap = new HashMap<String, PlatformDealExecutor>();

	private ScheduledExecutorService scheduledService = Executors.newScheduledThreadPool(
			Runtime.getRuntime().availableProcessors(), new NameThreadFactory(this.getClass().getSimpleName()));

	@Autowired
	private MasterConfig masterConfig;

	@Autowired
	private DbRegistryService dbRegistryService;

	private UdsServer udsServer;

	private SnowFlakeBuidID buildID;

/**
 * 初始化主控服务。
 *
 * <p>该方法在 Spring 容器启动后自动调用，负责：
 * <ul>
 *   <li>注册当前 Master 节点到数据库注册中心</li>
 *   <li>监听节点状态变化（连接/断开/挂起）以控制任务调度线程启停</li>
 *   <li>定时扫描超时任务与定时任务并处理</li>
 * </ul>
 *
 * <h3>主要流程如下：</h3>
 * <ol>
 *   <li>构建 UdsServer 实体并设置基本信息（端口、IP、角色等）</li>
 *   <li>调用 dbRegistryService.registrydb 注册本节点，并传入事件监听器</li>
 *   <li>监听事件类型包括：
 *     <ul>
 *       <li>Master 节点状态变化（CONNECTED/DISCONNECTED/SUSPENDED）</li>
 *       <li>Worker 节点下线或挂起，清理其正在运行的任务信息</li>
 *     </ul>
 *   </li>
 *   <li>初始化唯一 ID 生成器 SnowFlakeBuidID</li>
 *   <li>启动三个核心定时任务：
 *     <ul>
 *       <li>{@link #selectPendingOrDispatcherJobOfOverTime} 每分钟一次：处理超时的 Pending 或 Dispatcher 状态任务</li>
 *       <li>{@link #selectScheduleJobDb} 每10秒一次：扫描数据库中的定时任务并触发</li>
 *       <li>{@link SignalManager#run()} 每15秒一次：信号管理器处理任务信号队列</li>
 *     </ul>
 *   </li>
 * </ol>
 */
@PostConstruct
public void init() {
    // 构建当前 Master 节点的服务器信息
    UdsServer udsServer = new UdsServer();
    udsServer.setPort(masterConfig.getListenPort());
    udsServer.setServerName(masterConfig.getServerName());
    udsServer.setServerRoleName(masterConfig.getRoleName());
    udsServer.setServerRoleNameGroup(masterConfig.getRoleGroup());
    udsServer.setNodeClusterType(Constants.THREAD_NAME_MASTER_SERVER);
    udsServer.setIp(IPUtils.getHostIp());

    // 向数据库注册当前 Master 节点，并添加状态变更监听器
    udsServer = dbRegistryService.registrydb(udsServer, event -> {
        // 处理 Master 节点状态变化
        if (event.getState().equals(RegistryState.CONNECTED) || event.getState().equals(RegistryState.DISCONNECTED)
                || event.getState().equals(RegistryState.SUSPENDED)) {
            UdsServer udsServerTmp = event.getUdsServer();
            if (udsServerTmp.getNodeClusterType().equals(Constants.THREAD_NAME_MASTER_SERVER)) {
                if (udsServerTmp.equals(this.udsServer) && event.getState().equals(RegistryState.SUSPENDED)) {
                    // 当前 Master 被挂起，停止所有平台任务线程与信号管理器
                    stopPlatfromsDeal();
                    signalManager.stop();
                } else {
                    // 如果当前节点是主 Master，则启动任务线程与信号管理器；否则停止
                    UdsServer us = dbRegistryService.getMaster();
                    if (ObjectUtils.isEmpty(us) || us.equals(this.udsServer)) {
                        startPlatfromsDeal();
                        signalManager.start();
                    } else {
                        stopPlatfromsDeal();
                        signalManager.stop();
                    }
                }
            }
        }

        // 处理 Worker 节点下线或挂起，更新其上运行的任务状态为 UNKNOWN
        if (event.getState().equals(RegistryState.DISCONNECTED)
                || event.getState().equals(RegistryState.SUSPENDED)) {
            UdsServer udsServerTmp = event.getUdsServer();
            if (udsServerTmp.getNodeClusterType().equals(Constants.THREAD_NAME_WORKER_SERVER)) {
                WorkingInfo info = deleteWorkingInfo(new Host(udsServerTmp.getIp(), udsServerTmp.getPort()));
                if (!ObjectUtils.isEmpty(info)) {
                    jobMapper.updateJobStatus(ExecutionStatus.UNKNOWN, ExecutionStatus.RUNING, null, null, null,
                            info.getHost().getName());
                }
            }
        }
    });

    // 记录注册成功的 Master 地址
    logger.info("Registry Service address:{}", udsServer.getIp() + Symbol.MAO_HAO + udsServer.getPort());
    this.udsServer = udsServer;

    // 初始化唯一 ID 生成器
    buildID = new SnowFlakeBuidID(udsServer.getId());

    // 定时任务一：每1分钟执行一次，检查超时的 Pending/Dispatcher 状态任务
    scheduledService.scheduleWithFixedDelay(() -> {
        try {
            selectPendingOrDispatcherJobOfOverTime();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }, 1, 1, TimeUnit.MINUTES);

    // 定时任务二：每10秒执行一次，扫描数据库中需要触发的定时任务
    scheduledService.scheduleWithFixedDelay(() -> {
        try {
            selectScheduleJobDb();
        } catch (Exception e) {
            e.printStackTrace();
        }
    }, 60, 10, TimeUnit.SECONDS);

    // 定时任务三：每15秒执行一次，用于信号管理器处理任务信号
    scheduledService.scheduleWithFixedDelay(signalManager, 15, 15, TimeUnit.SECONDS);
}

/**
 * 在 Spring 容器关闭前调用，用于释放资源。
 *
 * <p>该方法会：
 * <ul>
 *   <li>关闭定时任务调度服务</li>
 *   <li>停止所有线程</li>
 *   <li>关闭平台任务处理线程池</li>
 * </ul>
 */
@PreDestroy
public void close() {
    logger.info("master thread stop start");
    scheduledService.shutdown(); // 关闭定时任务服务
    Stopper.stop(); // 停止所有线程
    platformThreadMap.values().forEach(exe -> exe.shutdown()); // 关闭所有平台任务线程
    logger.info("master thread stop end");
}

/**
 * 启动所有平台任务线程。
 */
public void startPlatfromsDeal() {
    platformThreadMap.values().forEach(exe -> exe.start());
}

/**
 * 停止所有平台任务线程。
 */
public void stopPlatfromsDeal() {
    platformThreadMap.values().forEach(exe -> exe.stop());
}

/**
 * 处理外部传入的任务信号，触发任务执行。
 *
 * @param jobSignal 任务信号对象
 * @return 是否成功处理任务信号
 */
public Boolean streamJobSignal(JobSignal jobSignal) {
    logger.info("out stream job signal {}", jobSignal);
    JobExecutionContext jobContext = createJobExecutionContext(jobSignal); // 创建任务执行上下文
    if (ObjectUtils.isEmpty(jobContext)) {
        logger.error("create job executioncontext context error,out stream job signal: {}", jobSignal);
        return false;
    }
    boolean pass = checkJobStatusAndFrequency(jobContext, jobSignal); // 检查任务状态与频率是否满足条件
    logger.error("check job status and frequency is not pass,out stream job signal: {}", jobSignal);
    if (pass) {
        conversionPending(jobContext, jobSignal); // 将任务设置为 Pending 状态并提交处理
        logger.info("job {} conversion pending", jobContext);
        return true;
    }
    return false;
}

/**
 * 根据任务信号创建任务执行上下文。
 *
 * @param jobSignal 任务信号
 * @return 任务执行上下文
 */
private JobExecutionContext createJobExecutionContext(JobSignal jobSignal) {
    UdsJob job = jobMapper.selectBySignal(jobSignal.getSignal()).orElse(null); // 从数据库获取任务信息
    if (ObjectUtils.isEmpty(job)) {
        logger.error("db udssource or udsjob of job is null,out stream job signal {}", jobSignal);
        AlarmSendUtils.sendAlarm(AlarmCode.DB_ERROR, Symbol.XING_HAO, Symbol.XING_HAO, jobSignal.getSignal(),
                Message.SYSTEM_INFORMATION.getMsg(), Message.SIGNAL_JOB_NOT_EXIST.getMsg(), AlarmLevel.H.name(),
                Constants.SYSTEM_STR, jobSignal.getSignal());
        return null;
    }
    UdsSystem system = systemCache.getUdsSystemByUseful(job.getPlatform(), job.getSystems()); // 获取系统配置
    if (ObjectUtils.isEmpty(system)) {
        logger.error("db table uds system column platform is null,out stream job signal: {} , platform: {}",
                jobSignal, job.getPlatform());
        AlarmSendUtils.sendAlarm(AlarmCode.DB_ERROR, job.getPlatform(), job.getSystems(), job.getJob(),
                Message.SYSTEM_INFORMATION.getMsg(), Message.JOB_SYSTEM_NOT_EXIST.getMsg(), AlarmLevel.H.name(),
                Constants.SYSTEM_STR, job.getJob());

        return null;
    }
    UdsJobConfig jobConfig = jobConfigMapper.selectBySignal(jobSignal.getSignal()).orElse(null); // 获取任务配置
    if (ObjectUtils.isEmpty(jobConfig)) {
        logger.error("db table uds config column platform is null,out stream job signal {}, platform:"
                + "{} systems:{} job:{}", jobSignal, job.getPlatform(), job.getSystems(), job.getJob());
        AlarmSendUtils.sendAlarm(AlarmCode.DB_ERROR, job.getPlatform(), job.getSystems(), job.getJob(),
                Message.SYSTEM_INFORMATION.getMsg(), Message.JOB_CONFIG_NOT_EXIST.getMsg(), AlarmLevel.H.name(),
                Constants.SYSTEM_STR, jobSignal.getSignal(), job.getJob());
        return null;
    }
    JobExecutionContext jobContext = createJobExecutionContext(job, jobConfig); // 创建任务执行上下文
    if (system.getUsePlatform()) {
        jobContext.setUsePlatform(true); // 设置是否使用平台配置
    }
    return jobContext;
}

	/**
 * 创建任务执行上下文。
 *
 * <p>该方法用于将 UdsJob 和 UdsJobConfig 封装到 JobExecutionContext 中，供后续任务调度使用。
 *
 * @param job 任务实体对象
 * @param jobConfig 任务配置对象
 * @return 返回封装好的任务执行上下文
 */
private JobExecutionContext createJobExecutionContext(UdsJob job, UdsJobConfig jobConfig) {
    JobExecutionContext jobContext = new JobExecutionContext();
    jobContext.setUdsJobConfig(jobConfig); // 设置任务配置
    jobContext.setUdsJob(job); // 设置任务信息
    return jobContext;
}

/**
 * 检查任务状态和频率是否满足触发条件。
 *
 * <p>该方法主要检查：
 * <ul>
 *   <li>任务类型是否为定时任务</li>
 *   <li>任务当前状态是否为 READY 或 SUCCESS</li>
 *   <li>批次号是否匹配</li>
 *   <li>执行日期是否符合预期</li>
 * </ul>
 *
 * @param jobContext 任务执行上下文
 * @param jobSignal 任务信号对象
 * @return 是否通过校验
 */
private boolean checkJobStatusAndFrequency(JobExecutionContext jobContext, JobSignal jobSignal) {
    if (jobContext.getUdsJobConfig().getJobType().equals(UdsJobType.C.name())) {
        logger.error("job is scheduled job,type is C, job:{}", jobContext);
        AlarmSendUtils.sendAlarm(
                AlarmSendUtils.buildDbError(jobContext, Message.NOT_TIME_JOB, jobContext.getJobNameOrJob()));
        return false;
    }
    if (jobContext.getExecutionStatus().equals(ExecutionStatus.READY)) {
        return true; // 如果任务是 READY 状态，可以直接触发
    }
    if (!jobContext.getExecutionStatus().equals(ExecutionStatus.SUCCESS)) {
        logger.warn("job status is not SUCCESS, job:{}", jobContext);
        AlarmSendUtils.sendAlarm(
                AlarmSendUtils.buildJobWarn(jobContext, Message.NOT_STATUS_SUCCESS, jobContext.getJobNameOrJob()));
        return false;
    }

    if (jobSignal.getBatch() > 0 && jobContext.getUdsJob().getMultiBatch() > 0) {
        if (jobSignal.getJobDate().equals(jobContext.getJobDate())) {
            if (jobSignal.getBatch() - jobContext.getUdsJob().getMultiBatch() == 1) {
                return true; // 批次号连续，允许触发
            } else if (jobSignal.getBatch() - jobContext.getUdsJob().getMultiBatch() > 1) {
                AlarmSendUtils
                        .sendAlarm(AlarmSendUtils.buildJobWarn(jobContext, Message.JOB_NOT_NEXT_RUN, jobSignal));
                return false;
            } else {
                AlarmSendUtils
                        .sendAlarm(AlarmSendUtils.buildJobWarn(jobContext, Message.JOB_HAS_BEEN_RUN, jobSignal));
                return false;
            }
        }
    } else {
        if (jobSignal.getBatch() - jobContext.getUdsJob().getMultiBatch() != 0) {
            logger.warn("jobSignal batch is error, job batch is 0, job:{}", jobContext);
            AlarmSendUtils.sendAlarm(AlarmSendUtils.buildJobError(jobContext, Message.JOB_BATCH_ERROR,
                    jobContext.getJobNameOrJob(), jobSignal));
            return false;
        }
    }

    if (jobContext.getNextJobDate().compareTo(jobSignal.getJobDate()) != 0) {
        LocalDate nextDay = calculateJobNextJobDate(jobContext); // 计算下一个执行日期
        if (ObjectUtils.isEmpty(nextDay)) {
            logger.warn("job calculate next job date is error , job:{}", jobContext);
            AlarmSendUtils.sendAlarm(AlarmSendUtils.buildJobWarn(jobContext, Message.JOB_FREQUENTNES_CONFIG_ERROR,
                    jobContext.getJobNameOrJob()));
            return false;
        }
        jobContext.getUdsJob().setNextJobDate(nextDay);
        if (jobContext.getNextJobDate().compareTo(jobSignal.getJobDate()) != 0) {
            if (jobContext.getNextJobDate().compareTo(jobSignal.getJobDate()) < 0) {
                AlarmSendUtils
                        .sendAlarm(AlarmSendUtils.buildJobWarn(jobContext, Message.JOB_NOT_NEXT_RUN, jobSignal));
            } else {
                AlarmSendUtils
                        .sendAlarm(AlarmSendUtils.buildJobWarn(jobContext, Message.JOB_HAS_BEEN_RUN, jobSignal));
            }
            logger.warn(
                    "job calculate next job date is not run job date ,  next job date:{}"
                            + "job signal date:{} ,job:{}",
                    jobContext.getNextJobDate(), jobSignal.getJobDate(), jobContext);
            return false;
        }
    }

    if (jobSignal.getBatch() > 1) {
        logger.warn("job calculate next job date is true but batch is error,job signal batch{},job:{}",
                jobSignal.getBatch(), jobContext);
        AlarmSendUtils.sendAlarm(AlarmSendUtils.buildJobWarn(jobContext, Message.JOB_NOT_NEXT_RUN, jobSignal));
        return false;
    }
    return true;
}

/**
 * 计算任务的下一个执行日期。
 *
 * <p>根据任务类型（D/Y/W/M/C）计算下一个执行日期：
 * <ul>
 *   <li>D：每日执行，加一天</li>
 *   <li>Y/W/M：根据 cron 表达式计算</li>
 *   <li>C：基于时间触发器的 cron 表达式计算</li>
 * </ul>
 *
 * @param jobContext 任务执行上下文
 * @return 下一个执行日期
 */
private LocalDate calculateJobNextJobDate(JobExecutionContext jobContext) {
    LocalDate nextDay = null;
    switch (UdsJobType.valueOf(jobContext.getUdsJobConfig().getJobType())) {
        case D: {
            nextDay = jobContext.getUdsJob().getJobDate().plusDays(1); // 每日执行，加一天
            break;
        }
        case Y:
        case W:
        case M: {
            List<String> cronList = jobContext.getCornList();
            if (CollectionUtils.isEmpty(cronList)) {
                List<UdsJobDateFrequency> frequencies = frequencyMapper.select(jobContext.getPlatform(),
                        jobContext.getSystem(), jobContext.getJob());
                cronList = frequencies.stream().map(mapper -> mapper.getCrontab()).collect(Collectors.toList());
                jobContext.setCornList(cronList);
            }
            if (!CollectionUtils.isEmpty(cronList)) {
                nextDay = DateUtils.getNextValidTime(cronList.toArray(new String[cronList.size()]),
                        jobContext.getUdsJob().getJobDate()); // 根据 cron 计算下一个执行日期
            }
            break;
        }
        case C: {
            List<String> cronList = jobContext.getTimeCornList();
            if (CollectionUtils.isEmpty(cronList)) {
                List<UdsJobTimeTrigger> triggers = triggerMapper.select(jobContext.getPlatform(),
                        jobContext.getSystem(), jobContext.getJob());
                cronList = triggers.stream().map(mapper -> mapper.getCrontab()).collect(Collectors.toList());
                jobContext.setTimeCornList(cronList);
            }
            if (!CollectionUtils.isEmpty(cronList)) {
                nextDay = DateUtils.getNextValidTime(cronList.toArray(new String[cronList.size()]),
                        jobContext.getUdsJob().getJobDate().plusDays(-1 * jobContext.getUdsJobConfig().getOffsetDay()));
                nextDay = nextDay.plusDays(jobContext.getUdsJobConfig().getOffsetDay()); // 考虑偏移天数
            }
        }
        default:
            break;
    }
    if (ObjectUtils.isEmpty(nextDay)) {
        return null;
    }
    return nextDay;
}

/**
 * 将任务信号中的全局参数、环境变量、批次号和执行日期设置到任务上下文中，并将任务状态转为 PENDING。
 *
 * @param jobContext 任务执行上下文
 * @param jobSignal 任务信号对象
 * @return 始终返回 true，表示处理成功
 */
public boolean conversionPending(JobExecutionContext jobContext, JobSignal jobSignal) {
    // 设置全局参数与环境变量
    jobContext.setGloParams(jobSignal.getParams());
    jobContext.setGloEnvs(jobSignal.getEnvs());

    // 设置批次号与执行日期
    jobContext.setBatch(jobSignal.getBatch());
    jobContext.setJobDate(jobSignal.getJobDate());

    // 转换任务状态为 PENDING
    conversionPending(jobContext);
    return true;
}

/**
 * 将任务状态设置为 PENDING，并更新数据库记录。
 *
 * <p>此方法会：
 * <ul>
 *   <li>更新任务状态为 PENDING</li>
 *   <li>设置挂起时间</li>
 *   <li>增加任务触发次数</li>
 *   <li>将更新后的任务信息写入数据库</li>
 * </ul>
 *
 * @param jobContext 任务执行上下文
 */
public void conversionPending(JobExecutionContext jobContext) {
    jobContext.setExecutionStatus(ExecutionStatus.PENDING); // 设置任务状态为 PENDING
    jobContext.getUdsJob().setPendingTime(LocalDateTime.now()); // 记录挂起时间
    jobContext.getUdsJob().setNumTimes(jobContext.getUdsJob().getNumTimes() + 1); // 增加触发次数
    jobMapper.updateByPrimaryKeySelective(jobContext.getUdsJob()); // 更新数据库中任务信息
    logger.info("job conversion pending,job:{}", jobContext);
    addPendingQueue(jobContext); // 添加到待处理队列
}

/**
 * 根据旧状态更新任务状态。
 *
 * @param newStatus 新状态
 * @param oldStatus 旧状态
 * @param serverName 服务器名称
 */
public void updateJobStatus(ExecutionStatus newStatus, ExecutionStatus oldStatus, String serverName) {
    jobMapper.updateJobStatus(newStatus, oldStatus, null, null, null, serverName);
    logger.info("job update dispatcher,serverName:{} oldstatus:{} newStatus:{}", serverName, oldStatus, newStatus);
}

/**
 * 根据平台/系统/作业名更新任务状态。
 *
 * @param newStatus 新状态
 * @param platform 平台名
 * @param systems 系统名
 * @param job 作业名
 */
public void updateJobStatus(ExecutionStatus newStatus, String platform, String systems, String job) {
    jobMapper.updateJobStatus(newStatus, null, platform, systems, job, null);
    logger.info("job update dispatcher,platform:{} system:{} job:{} newStatus:{}", platform, systems, job,
            newStatus);
}

/**
 * 将任务状态转换为 DISPATCHER（调度中）。
 *
 * @param jobContext 任务执行上下文
 */
public void conversionDispatcher(JobExecutionContext jobContext) {
    jobContext.setExecutionStatus(ExecutionStatus.DISPATCHER); // 设置任务状态为 DISPATCHER
    jobContext.getUdsJob().setDispatcherTime(LocalDateTime.now()); // 设置调度时间
    jobMapper.updateByPrimaryKeySelective(jobContext.getUdsJob()); // 更新数据库
    logger.info("job conversion dispatcher,job:{}", jobContext);
}

/**
 * 检查任务是否在配置的时间窗口内。
 *
 * @param jobConfig 任务配置
 * @return 是否在时间窗口内
 */
public boolean checkJobWinds(UdsJobConfig jobConfig) {
    return DateUtils.isTimeWindowRang(jobConfig.getTimeWindow());
}

/**
 * 检查任务的所有依赖任务是否已完成。
 *
 * <p>该方法会检查：
 * <ul>
 *   <li>依赖任务是否处于 READY 或 SUCCESS 状态</li>
 *   <li>依赖任务的批次是否满足要求</li>
 *   <li>依赖任务的执行日期是否符合当前任务的要求</li>
 * </ul>
 *
 * @param job 当前任务对象
 * @return 是否所有依赖任务都满足条件
 */
public boolean checkDependency(UdsJob job) {
    List<UdsJob> depencyList = jobMapper.getDependency(job.getPlatform(), job.getSystems(), job.getJob());
    for (UdsJob depJob : depencyList) {
        if (depJob.getLastStatus().equals(ExecutionStatus.READY.name())) {
            continue; // 如果依赖任务是 READY 状态，无需等待
        }
        if (depJob.getMultiBatch() == 0) {
            if (depJob.getNextJobDate().compareTo(job.getJobDate()) > 0) {
                continue; // 下次执行日期大于当前任务日期，跳过
            }
        } else {
            if (job.getMultiBatch() == 0) {
                Optional<UdsJobDependency> optDeo = dependencyMapper.getDependencyBatch(job.getPlatform(),
                        job.getSystems(), job.getJob(), depJob.getPlatform(), depJob.getSystems(), depJob.getJob());
                if (!optDeo.isPresent()) {
                    continue;
                }
                int depBatch = optDeo.get().getDepBatch();
                if (depJob.getMultiBatch() > depBatch || (depJob.getMultiBatch() == depBatch
                        && depJob.getLastStatus().equals(ExecutionStatus.SUCCESS.name()))) {
                    if (depJob.getNextJobDate().compareTo(job.getJobDate()) > 0) {
                        continue;
                    }
                }
                if (depJob.getMultiBatch() < depBatch && depJob.getJobDate().compareTo(job.getJobDate()) > 0) {
                    continue;
                }
            } else {
                if (depJob.getMultiBatch() > job.getMultiBatch() || (depJob.getMultiBatch() == job.getMultiBatch()
                        && depJob.getLastStatus().equals(ExecutionStatus.SUCCESS.name()))) {
                    if (depJob.getNextJobDate().compareTo(job.getJobDate()) > 0) {
                        continue;
                    }
                }
                if (depJob.getMultiBatch() < job.getMultiBatch()
                        && depJob.getJobDate().compareTo(job.getJobDate()) > 0) {
                    continue;
                }
            }
        }
        return false; // 存在未满足的依赖
    }
    return true; // 所有依赖均满足
}

/**
 * 增加主机上的任务权重。
 *
 * @param host 主机地址
 * @param jobContext 任务上下文
 */
public void incrementWeightAndJob(Host host, JobExecutionContext jobContext) {
    WorkingInfo workingInfo = mangerWorker.getWork(host);
    workingInfo.incrementJob(jobContext);
}

/**
 * 减少主机上的任务权重。
 *
 * @param host 主机地址
 * @param jobContext 任务上下文
 */
public void decrementWeightAndJob(Host host, JobExecutionContext jobContext) {
    WorkingInfo workingInfo = mangerWorker.getWork(host);
    if (ObjectUtils.isNotEmpty(workingInfo)) {
        workingInfo.decrementJob(jobContext);
    }
}

/**
 * 获取可用于调度任务的主机。
 *
 * <p>该方法会根据系统配置、平台/系统最大并发任务数、当前运行的任务数量以及权重策略选择一个合适的 Worker 节点。
 *
 * @param jobContext 任务执行上下文
 * @return 返回选中的 Host，如果无可用节点则返回 null
 */
public Host passDispatcherGetHost(JobExecutionContext jobContext) {
    // 获取系统配置信息
    UdsSystem udsSystem = systemCache.getUdsSystemByUseful(jobContext.getPlatform(), jobContext.getSystem());
    if (udsSystem.getUsePlatform()) {
        jobContext.setUsePlatform(true); // 如果使用平台配置，则设置为 true
    }

    // 获取当前平台/系统的任务数量
    int jobNum = mangerWorker.getJobNumByPlatformAndSystem(udsSystem.getPlatform(), udsSystem.getSystems());

    // 如果已达到最大任务数，返回 null
    if (udsSystem.getMaxRunJob() <= jobNum) {
        logger.info("udsSystem:{}_{} is max job num: {} < run job num: {}", udsSystem.getPlatform(),
                udsSystem.getSystems(), udsSystem.getMaxRunJob(), jobNum);
        return null;
    }

    // 获取所有可用的 Worker 节点，并过滤掉已达最大任务数的节点
    Collection<WorkingInfo> sourceWorks = mangerWorker.getWorkList();
    sourceWorks = sourceWorks.stream()
            .filter(predicate -> predicate.getJobNum().get() < predicate.getJobNumMax())
            .collect(Collectors.toList());

    if (CollectionUtils.isEmpty(sourceWorks)) {
        logger.info("server run is max, not find host");
        return null; // 没有可用的主机
    }

    // 根据权重管理器进一步筛选符合条件的节点
    sourceWorks = weightManger.check(jobContext, sourceWorks);
    if (CollectionUtils.isEmpty(sourceWorks)) {
        logger.info("job is check weight is not pass, job {}", jobContext);
        return null; // 权重检查未通过
    }

    // 使用选择器（SelectManager）从候选节点中选出最终要调度的主机
    Host host = SelectManger.getInstance().select(sourceWorks, udsSystem);
    return host;
}

/**
 * 向指定主机发送任务执行请求。
 *
 * @param jobContext 任务执行上下文
 * @return 是否成功发送
 */
public Boolean sendJobExecutionContext(JobExecutionContext jobContext) {
    Host host = jobContext.getHost();
    if (ObjectUtils.isEmpty(host)) {
        return false; // 主机为空，无法发送任务
    }
    // 通过 RPC 客户端向目标主机发送任务调度指令
    return masterRpc.getWorkClient(host).dispathcer(jobContext);
}

/**
 * 获取任务的所有步骤列表。
 *
 * @param platform 平台名
 * @param system 系统名
 * @param job 作业名
 * @return 返回封装后的 JobStepBean 列表
 */
public List<JobStepBean> getJobStepList(String platform, String system, String job) {
    List<JobStepBean> jobStepBeanList = new ArrayList<>();
    List<UdsJobStep> stepList = stepMapper.selectJobStepList(platform, system, job); // 查询数据库中的步骤信息
    for (UdsJobStep step : stepList) {
        JobStepBean jobStepBean = build(step); // 将每个步骤转换为 JobStepBean 对象
        jobStepBeanList.add(jobStepBean);
    }
    return jobStepBeanList;
}

/**
 * 构建单个 JobStepBean 对象。
 *
 * @param step 数据库中的 UdsJobStep 实体
 * @return 返回封装好的 JobStepBean
 */
private JobStepBean build(UdsJobStep step) {
    JobStepBean jobStepBean = new JobStepBean();
    jobStepBean.setStepType(step.getStepType());         // 设置步骤类型
    jobStepBean.setCmd(step.getOperCmd());               // 设置操作命令
    jobStepBean.setEnvs(step.getEnvironments());         // 设置环境变量
    jobStepBean.setPararmeter(step.getParameter());      // 设置参数
    jobStepBean.setWorkdir(step.getWorkDir());           // 设置工作目录
    jobStepBean.setStepNum(step.getStepNum());           // 设置步骤编号
    jobStepBean.setStepPath(step.getScriptPath());       // 设置脚本路径
    jobStepBean.setUpdateTime(step.getUpdateTime());     // 设置更新时间
    return jobStepBean;
}

/**
 * 将任务状态标记为 SUCCESS，并更新结束时间和下一个执行日期。
 *
 * @param jobContext 任务执行上下文
 * @return 是否成功处理
 */
public boolean conversionSuccess(JobExecutionContext jobContext) {
    jobContext.setExecutionStatus(ExecutionStatus.SUCCESS); // 设置任务状态为 SUCCESS
    jobContext.getUdsJob().setEndTime(LocalDateTime.now());   // 设置任务结束时间
    jobContext.getUdsJob().setNextJobDate(calculateJobNextJobDate(jobContext)); // 计算下一次执行时间

    // 更新数据库中的任务信息
    jobMapper.updateByPrimaryKeySelective(jobContext.getUdsJob());
    logger.info("job conversion success,job:{}", jobContext);
    return true;
}

/**
 * 任务成功后处理后续操作，包括：
 * <ul>
 *   <li>触发依赖该任务的任务</li>
 *   <li>触发自流任务</li>
 *   <li>发送调度信号</li>
 * </ul>
 *
 * @param jobContext 当前任务的执行上下文
 * @return 始终返回 true，表示处理成功
 */
public boolean conversionSuccessAfter(JobExecutionContext jobContext) {
    addDepStreamJobDealQueue(jobContext); // 添加依赖当前任务的任务到队列
    addSelfStreamJobDealQueue(jobContext); // 添加自流任务到队列
    addDispatcherDealSignal(jobContext.getPlatform()); // 发送平台级别的调度信号
    return true;
}


/**
 * 将任务状态转换为失败（FAILURE），并记录结束时间。
 *
 * <p>同时记录日志并发送告警通知。
 *
 * @param jobContext 当前任务的执行上下文
 * @return 始终返回 true，表示处理成功
 */
public boolean conversionFailure(JobExecutionContext jobContext) {
    jobContext.setExecutionStatus(ExecutionStatus.FAILURE); // 设置任务状态为 FAILURE
    jobContext.getUdsJob().setEndTime(LocalDateTime.now()); // 记录结束时间
    jobMapper.updateByPrimaryKeySelective(jobContext.getUdsJob()); // 更新数据库中的任务信息
    logger.info("job conversion failure,job:{}", jobContext);
    AlarmSendUtils.sendAlarm(
            AlarmSendUtils.buildJobError(jobContext, Message.JOB_STATUS_ERROR, jobContext.getJobNameOrJob()));
    return true;
}


/**
 * 将任务状态转换为运行中（RUNING），并更新数据库。
 *
 * <p>如果更新成功且最近一次更新未超过30秒且服务器名称一致，则返回 true。
 *
 * @param jobContext 当前任务的执行上下文
 * @return 是否成功转换状态
 */
public Boolean conversionRuning(JobExecutionContext jobContext) {
    jobContext.getUdsJob().setStartTime(LocalDateTime.now()); // 设置开始时间
    jobContext.setExecutionStatus(ExecutionStatus.RUNING); // 设置任务状态为 RUNING
    logger.info("job conversion runing,job:{}", jobContext);
    jobMapper.updateByPrimaryKeySelective(jobContext.getUdsJob(), ExecutionStatus.DISPATCHER); // 更新任务状态

    Optional<UdsJob> optional = jobMapper.selectByPrimaryKey(jobContext.getUdsJob().getId());
    if (optional.isPresent()
            && Duration.between(optional.get().getUpdateTime(), LocalDateTime.now()).getSeconds() < 30
            && optional.get().getServerName().equals(jobContext.getUdsJob().getServerName())) {
        return true; // 如果更新时间在30秒内且服务器一致，返回 true
    }
    return false;
}


/**
 * 自流任务处理逻辑。
 *
 * <p>根据配置判断是否启用自流，并决定是否将任务重新放入待处理队列。
 *
 * @param jobContext 当前任务的执行上下文
 */
public void selfStream(JobExecutionContext jobContext) {
    if (!jobContext.getUdsJobConfig().getCheckStreamSelf()) {
        return; // 如果不启用自流，直接返回
    }
    logger.debug(jobContext.getUdsJob().getId() + ":进入selfStream");
    Optional<UdsJobSource> optional = sourceMapper.selectOne(jobContext.getPlatform(), jobContext.getSystem(),
            jobContext.getJob());
    if (optional.isPresent()) {
        LocalDate jobDate = jobContext.getUdsJob().getJobDate();
        Integer batch = 0;
        Optional<UdsJobSelfSignal> optSelf = null;
        if (jobContext.getUdsJob().getMultiBatch() == 0) {
            jobDate = jobContext.getUdsJob().getNextJobDate(); // 使用下一个执行日期
            optSelf = selfSignalMapper.selectOneSignal(jobContext.getPlatform(), jobContext.getSystem(),
                    jobContext.getJob(), jobDate, batch);
        } else {
            batch = jobContext.getUdsJob().getMultiBatch() + 1;
            optSelf = selfSignalMapper.selectOneSignal(jobContext.getPlatform(), jobContext.getSystem(),
                    jobContext.getJob(), jobDate, batch);
            if (!optSelf.isPresent()) {
                batch = 1;
                jobDate = jobContext.getUdsJob().getNextJobDate(); // 回退到下一批次
                optSelf = selfSignalMapper.selectOneSignal(jobContext.getPlatform(), jobContext.getSystem(),
                        jobContext.getJob(), jobDate, batch);
            }
        }
        if (optSelf.isPresent()) {
            jobContext.getUdsJob().setMultiBatch(batch); // 设置新批次号
            jobContext.getUdsJob().setJobDate(jobDate); // 设置新执行日期
            logger.info("job self Stream ,job:{}", jobContext);
            conversionPending(jobContext); // 转换为 PENDING 状态
        }
    } else {
        boolean isStream;
        if (jobContext.getUdsJob().getMultiBatch() == 0) {
            jobContext.getUdsJob().setJobDate(jobContext.getUdsJob().getNextJobDate()); // 使用下一个执行日期
            isStream = checkDependency(jobContext.getUdsJob());
        } else {
            int tmpBatch = jobContext.getUdsJob().getMultiBatch();
            jobContext.getUdsJob().setMultiBatch(tmpBatch + 1); // 批次号加一
            if (checkDependency(jobContext.getUdsJob())) {
                isStream = true;
            } else {
                jobContext.getUdsJob().setJobDate(jobContext.getUdsJob().getNextJobDate()); // 下一个执行日期
                jobContext.getUdsJob().setMultiBatch(1); // 批次重置为 1
                isStream = checkDependency(jobContext.getUdsJob());
            }
        }

        // 判断执行日期是否符合要求
        LocalDate localDate = jobContext.getUdsJob().getJobDate();
        if (localDate.compareTo(jobContext.getUdsJob().getNextJobDate()) < 0) {
            LocalDate nextDay = calculateJobNextJobDate(jobContext); // 计算下一个执行日期
            if (ObjectUtils.isEmpty(nextDay)) {
                logger.warn("job calculate next job date is error , job:{}", jobContext);
                AlarmSendUtils.sendAlarm(AlarmSendUtils.buildJobWarn(jobContext,
                        Message.JOB_FREQUENTNES_CONFIG_ERROR, jobContext.getJobNameOrJob()));
                return;
            }
            if (!nextDay.equals(jobContext.getUdsJob().getNextJobDate())) {
                UdsJob record = new UdsJob();
                record.setId(jobContext.getUdsJob().getId());
                record.setNextJobDate(nextDay);
                jobMapper.updateByPrimaryKeySelective(record); // 更新下一个执行日期
                jobContext.getUdsJob().setNextJobDate(nextDay);
            }
            if (localDate.compareTo(jobContext.getUdsJob().getNextJobDate()) < 0) {
                return;
            }
        }

        jobContext.getUdsJob().setJobDate(jobContext.getUdsJob().getNextJobDate());
        jobContext.setGloEnvs(jobContext.getGloEnvs()); // 设置环境变量
        jobContext.setGloParams(jobContext.getGloParams()); // 设置参数
        if (!checkDependency(jobContext.getUdsJob())) {
            return;
        }

        logger.info("job dep Stream ,job:{}", jobContext);
        jobContext.setExecutionStatus(ExecutionStatus.PENDING); // 设置为 PENDING 状态
        jobContext.getUdsJob().setPendingTime(LocalDateTime.now()); // 设置挂起时间
        jobContext.getUdsJob().setNumTimes(jobContext.getUdsJob().getNumTimes() + 1); // 触发次数加一
        jobMapper.updateByPrimaryKeySelective(jobContext.getUdsJob()); // 更新任务信息

        if (!checkJobWinds(jobContext.getUdsJobConfig())) { // 检查是否在时间窗口内
            return;
        }

        if (jobContext.getUdsJobConfig().getVirtualEnable()) { // 是否是虚拟任务
            jobContext.getUdsJob().setDispatcherTime(LocalDateTime.now());
            jobContext.getUdsJob().setNumTimes(jobContext.getUdsJob().getNumTimes() + 1);
            jobContext.getUdsJob().setServerName(""); // 不指定执行主机
            jobContext.getUdsJob().setStartTime(LocalDateTime.now());
            logger.info("job is virtual conversion success,jog:{}", jobContext);
            conversionSuccess(jobContext); // 成功完成虚拟任务
            conversionSuccessAfter(jobContext); // 后续处理
            insertJobRecord(jobContext); // 插入任务记录
        } else {
            conversionDispatcher(jobContext); // 调度任务
            addDispatcherDealQueue(jobContext); // 加入调度队列
        }
    }
}

/**
 * 处理当前任务的依赖任务流，触发符合条件的依赖任务执行。
 *
 * <p>该方法的主要作用是：当某个任务成功完成后，根据其定义的依赖关系，
 * 自动触发所有依赖于该任务的任务。该方法会处理批次、执行日期、依赖状态等逻辑，
 * 确保只有满足条件的任务才会被调度执行。
 *
 * <h3>主要流程如下：</h3>
 * <ol>
 *   <li>获取当前任务的批次号</li>
 *   <li>查询所有依赖于当前任务的目标任务配置</li>
 *   <li>遍历每个目标任务配置并处理：
 *     <ul>
 *       <li>跳过源任务（即由外部信号触发的任务）</li>
 *       <li>检查目标任务是否存在</li>
 *       <li>构建目标任务的执行上下文</li>
 *       <li>根据状态决定是否加入待处理队列或直接跳过</li>
 *       <li>处理批次传递逻辑</li>
 *       <li>计算执行日期并继承父任务的环境变量和参数</li>
 *       <li>检查目标任务的所有依赖是否满足</li>
 *       <li>如果满足条件，则将目标任务设为 PENDING 并加入调度队列</li>
 *     </ul>
 *   </li>
 * </ol>
 *
 * @param jobContext 当前任务的执行上下文，包含任务信息、状态、批次、执行时间等
 */
public void depStream(JobExecutionContext jobContext) {
    // 获取当前任务的批次号
    Integer multiBatch = jobContext.getUdsJob().getMultiBatch();

    // 查询依赖于当前任务的所有目标任务配置
    List<UdsJobConfig> udsjobList = jobConfigMapper.selectByDepAndCondition(jobContext.getPlatform(),
            jobContext.getSystem(), jobContext.getJob(), multiBatch);

    // 遍历每个目标任务配置
    for (UdsJobConfig targetJobConfig : udsjobList) {
        // 检查该任务是否是源任务（即是否被其他任务触发）
        Optional<UdsJobSource> optional = sourceMapper.selectOne(targetJobConfig.getPlatform(),
                targetJobConfig.getSystems(), targetJobConfig.getJob());

        if (optional.isPresent()) {
            continue; // 如果是源任务，跳过处理
        }

        // 从数据库中获取目标任务信息
        UdsJob targetJob = jobMapper
                .selectOne(targetJobConfig.getPlatform(), targetJobConfig.getSystems(), targetJobConfig.getJob())
                .orElse(null);

        if (targetJob == null) {
            // 如果目标任务不存在，发送告警并跳过
            AlarmSendUtils.sendAlarm(AlarmCode.DB_ERROR, targetJobConfig.getPlatform(),
                    targetJobConfig.getSystems(), targetJobConfig.getJob(), Message.SYSTEM_INFORMATION.getMsg(),
                    Message.JOB_NOT_EXIST.getMsg(), AlarmLevel.H.name(), Constants.SYSTEM_STR,
                    targetJobConfig.getJob());
            continue;
        }

        // 创建目标任务的执行上下文
        JobExecutionContext targetJobContext = createJobExecutionContext(targetJob, targetJobConfig);

        // 如果目标任务处于 PENDING 状态，加入待处理队列
        if (targetJob.getLastStatus().compareTo(ExecutionStatus.PENDING.name()) == 0) {
            addPendingQueue(targetJobContext);
            continue;
        }

        // 如果目标任务状态不是 SUCCESS 或 READY，则跳过
        if (!(targetJob.getLastStatus().compareTo(ExecutionStatus.SUCCESS.name()) == 0
                || targetJob.getLastStatus().compareTo(ExecutionStatus.READY.name()) == 0)) {
            continue;
        }

        // 如果目标任务处于 READY 状态，计算其下一次执行日期
        if (targetJob.getLastStatus().compareTo(ExecutionStatus.READY.name()) == 0) {
            LocalDate nextDay = calculateJobNextJobDate(targetJobContext);
            if (ObjectUtils.isEmpty(nextDay)) {
                logger.warn("job calculate next job date is error , job:{}", targetJobContext);
                AlarmSendUtils.sendAlarm(AlarmSendUtils.buildJobWarn(targetJobContext,
                        Message.JOB_FREQUENTNES_CONFIG_ERROR, targetJobContext.getJobNameOrJob()));
                continue;
            }
            targetJobContext.getUdsJob().setNextJobDate(nextDay);
        }

        // 依赖传递批次逻辑处理
        Integer targetBatch = targetJob.getMultiBatch();

        // 单批次触发多批次，转入自触触发检测
        if (multiBatch == 0 && targetBatch > 0) {
            selfStream(targetJobContext);
            continue;
        } else if (multiBatch == 0 && targetBatch == 0) {
            // 不做任何操作
        } else if (multiBatch > 0 && targetBatch > 0) {
            // 如果当前批次能整除目标任务的转换批次
            if (multiBatch % targetJobConfig.getBatchConversion() == 0) {
                int batch = multiBatch / targetJobConfig.getBatchConversion();
                if (batch - targetBatch == 1) {
                    targetJob.setMultiBatch(batch); // 更新目标任务的批次
                }
            }
            continue;
        } else if (multiBatch > 0 && targetBatch == 0) {
            // 查询依赖批次关系
            Optional<UdsJobDependency> optDeo = dependencyMapper.getDependencyBatch(targetJob.getPlatform(),
                    targetJob.getSystems(), targetJob.getJob(), jobContext.getPlatform(), jobContext.getSystem(),
                    jobContext.getJob());

            if (!optDeo.isPresent()) {
                continue;
            }

            int depBatch = optDeo.get().getDepBatch();
            if (multiBatch != depBatch) {
                continue;
            }
        }

        // 日期符合性判断
        LocalDate localDate = jobContext.getUdsJob().getJobDate();

        if (localDate.compareTo(targetJob.getNextJobDate()) < 0) {
            LocalDate nextDay = calculateJobNextJobDate(targetJobContext);
            if (ObjectUtils.isEmpty(nextDay)) {
                logger.warn("job calculate next job date is error , job:{}", targetJobContext);
                AlarmSendUtils.sendAlarm(AlarmSendUtils.buildJobWarn(targetJobContext,
                        Message.JOB_FREQUENTNES_CONFIG_ERROR, targetJobContext.getJobNameOrJob()));
                continue;
            }

            if (!nextDay.equals(targetJobContext.getUdsJob().getNextJobDate())) {
                UdsJob record = new UdsJob();
                record.setId(targetJobContext.getUdsJob().getId());
                record.setNextJobDate(nextDay);
                jobMapper.updateByPrimaryKeySelective(record);
                targetJobContext.getUdsJob().setNextJobDate(nextDay);
            }

            if (localDate.compareTo(targetJob.getNextJobDate()) < 0) {
                continue;
            }
        }

        // 设置目标任务的执行日期为下一个有效日期
        targetJobContext.getUdsJob().setJobDate(targetJobContext.getUdsJob().getNextJobDate());

        // 继承父任务的环境变量和全局参数
        targetJobContext.setGloEnvs(jobContext.getGloEnvs());
        targetJobContext.setGloParams(jobContext.getGloParams());

        // 检查目标任务的所有依赖是否满足
        if (!checkDependency(targetJobContext.getUdsJob())) {
            continue;
        }

        logger.info("job dep Stream ,job:{}", targetJobContext);

        // 设置目标任务为 PENDING 状态，并更新数据库
        targetJobContext.setExecutionStatus(ExecutionStatus.PENDING);
        targetJobContext.getUdsJob().setPendingTime(LocalDateTime.now());
        targetJobContext.getUdsJob().setNumTimes(targetJobContext.getUdsJob().getNumTimes() + 1);
        jobMapper.updateByPrimaryKeySelective(targetJobContext.getUdsJob());

        // 检查时间窗口是否允许执行
        if (!checkJobWinds(targetJobContext.getUdsJobConfig())) {
            continue;
        }

        // 如果是虚拟任务，直接标记为成功
        if (targetJobContext.getUdsJobConfig().getVirtualEnable()) {
            targetJobContext.getUdsJob().setDispatcherTime(LocalDateTime.now());
            targetJobContext.getUdsJob().setNumTimes(jobContext.getUdsJob().getNumTimes() + 1);
            targetJobContext.getUdsJob().setServerName("");
            targetJobContext.getUdsJob().setStartTime(LocalDateTime.now());
            logger.info("job is virtual conversion success,jog:{}", targetJobContext);
            conversionSuccess(targetJobContext);
            conversionSuccessAfter(targetJobContext);
            insertJobRecord(targetJobContext);
        } else {
            // 否则标记为 DISPATCHER 并加入调度队列
            conversionDispatcher(targetJobContext);
            addDispatcherDealQueue(targetJobContext);
        }
    }
}

/*    作用：将任务加入待处理队列。
    实现逻辑：
    如果平台对应的线程池不存在，则创建一个新的线程池。
    将任务添加到对应平台的待处理队列中*/
	public void addPendingQueue(JobExecutionContext jobContext) {
		PlatformDealExecutor platformDealExecutor = platformThreadMap.get(jobContext.getPlatform());
		if (platformDealExecutor == null) {
			platformDealExecutor = new PlatformDealExecutor(this);
			platformDealExecutor.setName(jobContext.getPlatform());
			platformDealExecutor.execute();
			platformThreadMap.put(jobContext.getPlatform(), platformDealExecutor);
		}
		platformDealExecutor.addPendingQueue(jobContext);
		logger.debug(jobContext.getUdsJob().getId() + ":进入Pending");
	}


/*    作用：将任务加入调度队列。
    实现逻辑：
    如果平台对应的线程池不存在，则创建一个新的线程池。
    将任务添加到对应平台的调度队列中。*/
	public void addDispatcherDealQueue(JobExecutionContext jobContext) {
		PlatformDealExecutor platformDealExecutor = platformThreadMap.get(jobContext.getPlatform());
		if (platformDealExecutor == null) {
			platformDealExecutor = new PlatformDealExecutor(this);
			platformDealExecutor.setName(jobContext.getPlatform());
			platformDealExecutor.execute();
			platformThreadMap.put(jobContext.getPlatform(), platformDealExecutor);
		}
		platformDealExecutor.addDispatcherDealQueue(jobContext);
		logger.debug(jobContext.getUdsJob().getId() + ":进入Distapcher");
	}

/*    作用：将自触发任务加入自触发队列。
    实现逻辑：
    如果平台对应的线程池不存在，则创建一个新的线程池。
    将任务添加到对应平台的自触发队列中。*/
	public void addSelfStreamJobDealQueue(JobExecutionContext jobContext) {
		PlatformDealExecutor platformDealExecutor = platformThreadMap.get(jobContext.getPlatform());
		if (platformDealExecutor == null) {
			platformDealExecutor = new PlatformDealExecutor(this);
			platformDealExecutor.setName(jobContext.getPlatform());
			platformDealExecutor.execute();
			platformThreadMap.put(jobContext.getPlatform(), platformDealExecutor);
		}
		logger.debug(jobContext.getUdsJob().getId() + ":进入selfStream");
		platformDealExecutor.addSelfStreamJobDealQueue(jobContext);
	}


/*    作用：向指定平台发送调度信号。
    实现逻辑：
    如果平台线程池存在，则发送一个调度信号。*/
	public void addDispatcherDealSignal(String platfrom) {
		PlatformDealExecutor platformDealExecutor = platformThreadMap.get(platfrom);
		if (!ObjectUtils.isEmpty(platformDealExecutor)) {
			logger.debug(platfrom + ":信号");
			platformDealExecutor.addReadyDispatcherDealSignal(platfrom);
		}
	}

/*    作用：将依赖触发任务加入依赖触发队列。
    实现逻辑：
    如果平台对应的线程池不存在，则创建一个新的线程池。
    将任务添加到对应平台的依赖触发队列中。*/
	public void addDepStreamJobDealQueue(JobExecutionContext jobContext) {
		PlatformDealExecutor platformDealExecutor = platformThreadMap.get(jobContext.getPlatform());
		if (platformDealExecutor == null) {
			platformDealExecutor = new PlatformDealExecutor(this);
			platformDealExecutor.setName(jobContext.getPlatform());
			platformDealExecutor.execute();
			platformThreadMap.put(jobContext.getPlatform(), platformDealExecutor);
		}
		platformDealExecutor.addDepStreamJobDealQueue(jobContext);
		logger.debug(jobContext.getUdsJob().getId() + ":进入depStream");
	}

/*    作用：处理超时的 Pending 或 Dispatcher 状态任务。
    实现逻辑：
    查询所有超时的任务。
    根据任务状态重新将其加入待处理或调度队列*/
	public void selectPendingOrDispatcherJobOfOverTime() {
		try {
			List<UdsJob> list = jobMapper.selectOverTimePendingOrDispathcer();
			for (UdsJob job : list) {
				UdsJobConfig jobConfig = jobConfigMapper.selectOne(job.getPlatform(), job.getSystems(), job.getJob())
						.orElse(null);
				if (ObjectUtils.isEmpty(jobConfig)) {
					continue;
				}
				JobExecutionContext jobContext = createJobExecutionContext(job, jobConfig);
				if (jobContext.getExecutionStatus().equals(ExecutionStatus.PENDING)) {
					logger.debug(jobContext.getUdsJob().getId() + ":进入overTimePending");
					addPendingQueue(jobContext);
				} else if (jobContext.getExecutionStatus().equals(ExecutionStatus.DISPATCHER)) {
					logger.debug(jobContext.getUdsJob().getId() + ":进入overTimeDispatcher");
					addDispatcherDealQueue(jobContext);
				}
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

/*    作用：扫描数据库中的定时任务并触发。
    实现逻辑：
    查询所有需要触发的定时任务。
    计算任务的执行时间和批次。
    如果条件满足，则触发任务 */
	public void selectScheduleJobDb() {
		try {
			List<UdsJobTimeTrigger> list = triggerMapper.selectJobToScheduleTime();
			for (UdsJobTimeTrigger jobTimeTrigger : list) {
				UdsJobConfig jobConfig = jobConfigMapper
						.selectOne(jobTimeTrigger.getPlatform(), jobTimeTrigger.getSystems(), jobTimeTrigger.getJob())
						.orElse(null);
				if (ObjectUtils.isEmpty(jobConfig) || !jobConfig.getIsEnable()
						|| !jobConfig.getJobType().equals(UdsJobType.C.name()) || !jobConfig.getCheckTimeTrigger()) {
					continue;
				}
				UdsJob job = jobMapper
						.selectOne(jobTimeTrigger.getPlatform(), jobTimeTrigger.getSystems(), jobTimeTrigger.getJob())
						.orElse(null);
				if (ObjectUtils.isEmpty(job)) {
					AlarmSendUtils.sendAlarm(AlarmCode.DB_ERROR, job.getPlatform(), job.getSystems(), job.getJob(),
							Message.SYSTEM_INFORMATION.getMsg(), Message.JOB_NOT_EXIST.getMsg(), AlarmLevel.H.name(),
							Constants.SYSTEM_STR, job.getJob());
					continue;
				}
				JobExecutionContext jobContext = createJobExecutionContext(job, jobConfig);
				jobContext.setTimeCornList(Arrays.asList(new String[] { jobTimeTrigger.getCrontab() }));
				LocalDate targetDate = jobTimeTrigger.getStartTime().plusDays(jobConfig.getOffsetDay()).toLocalDate();
				Boolean pass = true;
				if (jobContext.getExecutionStatus().equals(ExecutionStatus.READY)) {
					while (targetDate.compareTo(job.getJobDate()) <= 0) {
						LocalDateTime nextStart = DateUtils.getNextValidTime(jobTimeTrigger.getCrontab(),
								jobTimeTrigger.getStartTime());
						jobTimeTrigger.setStartTime(nextStart);
						targetDate = jobTimeTrigger.getStartTime().plusDays(jobConfig.getOffsetDay()).toLocalDate();
					}
					if (LocalDateTime.now().compareTo(jobTimeTrigger.getStartTime()) < 0) {
						triggerMapper.updateByPrimaryKeySelective(jobTimeTrigger);
						continue;
					}
					if (job.getMultiBatch() > 0) {
						job.setMultiBatch(1);
					}
					job.setJobDate(targetDate);
				} else if (jobContext.getExecutionStatus().equals(ExecutionStatus.SUCCESS)) {
					if (job.getMultiBatch() > 0) {
						if (targetDate.compareTo(job.getJobDate()) == 0) {
							job.setMultiBatch(job.getMultiBatch() + 1);
						} else if (targetDate.compareTo(job.getJobDate()) > 0) {
							job.setMultiBatch(1);
							job.setJobDate(targetDate);
						} else {
							logger.warn("执行日期不对 作业:{} 当前日期:{} 执行日期:{} ", job.getJob(), job.getJobDate(), targetDate);
							pass = false;
						}
					} else if (job.getMultiBatch() == 0) {
						if (targetDate.compareTo(job.getJobDate()) > 0) {
							job.setJobDate(targetDate);
						} else {
							logger.warn("执行批次不对 作业:{} 当前日期:{} 执行日期:{} ", job.getJob(), job.getJobDate(), targetDate);
							pass = false;
						}
					}
				} else {
					continue;
				}
				try {
					LocalDateTime nextStart = DateUtils.getNextValidTime(jobTimeTrigger.getCrontab(),
							jobTimeTrigger.getStartTime());
					jobTimeTrigger.setStartTime(nextStart);
					triggerMapper.updateByPrimaryKeySelective(jobTimeTrigger);
				} catch (ParseException e) {
					e.printStackTrace();
				}
				if (pass) {
					conversionPending(jobContext);
				}
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

//    作用：更新主机上的任务信息。
	public void updateWorkingInfo(WorkingInfo workingInfo) {
		mangerWorker.updateWork(workingInfo);
	}
//    作用：删除主机上的任务信息。
	public WorkingInfo deleteWorkingInfo(Host host) {
		return mangerWorker.deleteWork(host);
	}
//    作用：插入任务记录到数据库。
	public void insertJobRecord(JobExecutionContext jobContext) {
		UdsJobRecord record = new UdsJobRecord();
		record.setId(jobContext.getTaskInstanceId());
		record.setComplementId(jobContext.getComplementId());
		record.setPlatform(jobContext.getPlatform());
		record.setSystems(jobContext.getSystem());
		record.setJob(jobContext.getJob());
		record.setDispatcherTime(jobContext.getUdsJob().getDispatcherTime());
		record.setJobDate(jobContext.getUdsJob().getJobDate());
		record.setJobType(jobContext.getUdsJobConfig().getJobType());
		record.setLastStatus(jobContext.getUdsJob().getLastStatus());
		record.setMultiBatch(jobContext.getUdsJob().getMultiBatch());
		record.setNumTimes(jobContext.getUdsJob().getNumTimes());
		record.setPendingTime(jobContext.getUdsJob().getPendingTime());
		record.setStartTime(jobContext.getUdsJob().getStartTime());
		record.setEndTime(jobContext.getUdsJob().getEndTime());
		record.setServerName(jobContext.getUdsJob().getServerName());
		record.setVirtualEnable(jobContext.getUdsJobConfig().getVirtualEnable());
		record.setStreamType(jobContext.getUdsJob().getStreamType());
		recordMapper.insertSelective(record);
	}

//    作用：更新任务记录。
	public void updateJobRecord(JobExecutionContext jobContext) {
		UdsJobRecord record = new UdsJobRecord();
		record.setId(jobContext.getTaskInstanceId());
		record.setComplementId(jobContext.getComplementId());
		record.setPlatform(jobContext.getPlatform());
		record.setSystems(jobContext.getSystem());
		record.setJob(jobContext.getJob());
		record.setDispatcherTime(jobContext.getUdsJob().getDispatcherTime());
		record.setJobDate(jobContext.getUdsJob().getJobDate());
		record.setJobType(jobContext.getUdsJobConfig().getJobType());
		record.setLastStatus(jobContext.getUdsJob().getLastStatus());
		record.setMultiBatch(jobContext.getUdsJob().getMultiBatch());
		record.setNumTimes(jobContext.getUdsJob().getNumTimes());
		record.setPendingTime(jobContext.getUdsJob().getPendingTime());
		record.setStartTime(jobContext.getUdsJob().getStartTime());
		record.setEndTime(jobContext.getUdsJob().getEndTime());
		record.setServerName(jobContext.getUdsJob().getServerName());
		record.setVirtualEnable(jobContext.getUdsJobConfig().getVirtualEnable());
		record.setStreamType(jobContext.getUdsJob().getStreamType());
		recordMapper.updateByPrimaryKeySelective(record);
	}
//    作用：更新任务步骤记录。
	public void insertUdsJobStepRecord(UdsJobStepRecord record) {
		stepRecordMapper.insertSelective(record);
	}
//    作用：更新任务步骤记录。
	public void updateJobStepRecord(UdsJobStepRecord record) {
		stepRecordMapper.updateByPrimaryKeySelective(record);
	}

//    作用：生成唯一的任务ID。
	public long getSingleId() {
		if (ObjectUtils.isEmpty(buildID)) {
			buildID = new SnowFlakeBuidID();
		}
		return buildID.getNextId();
	}
//    作用：插入自触发信号
	public void insertSelfSignal(JobSignal jobSignal, String platform, String systems, String job) {
		UdsJobSelfSignal record = new UdsJobSelfSignal();
		record.setPlatform(platform);
		record.setSystems(systems);
		record.setJob(job);
		record.setBatch(jobSignal.getBatch());
		record.setJobDate(
				LocalDate.parse(jobSignal.getJobDate(), DateTimeFormatter.ofPattern(DateUtils.PATTERN_YYYYMMDD_CONS)));
		record.setEvns(JSON.toJSONString(jobSignal.getEnvs()));
		record.setParams(JSON.toJSONString(jobSignal.getParams()));
		record.setUseful(true);
		selfSignalMapper.insertSelective(record);
	}
//    作用：根据平台、系统、作业名创建任务执行上下文。
	public JobExecutionContext createJobExecutionContext(String platform, String system, String job) {
		UdsJobConfig jobConfig = jobConfigMapper.selectOne(platform, system, job).orElse(null);
		UdsJob udsJob = jobMapper.selectOne(platform, system, job).orElse(null);
		List<JobStepBean> stepList = getJobStepList(platform, system, job);
		JobExecutionContext jobExecutionContext = createJobExecutionContext(udsJob, jobConfig);
		jobExecutionContext.setStepList(stepList);
		return jobExecutionContext;
	}
//    作用：插入任务记录。
	public void insertUdsJobRecord(UdsJobRecord record) {
		recordMapper.insertSelective(record);

	}
//    作用：更新任务记录。
	public void updateJobRecord(UdsJobRecord record) {
		recordMapper.updateByPrimaryKeySelective(record);
	}

}
