package cn.spdb.harrier.server.master.complement;

import java.util.Arrays;
import java.util.List;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.stream.Collectors;

import javax.annotation.PostConstruct;

import org.apache.commons.lang3.ObjectUtils;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;

import cn.spdb.harrier.common.enmus.ExecutionStatus;
import cn.spdb.harrier.common.enmus.StreamType;
import cn.spdb.harrier.common.enmus.UdsJobType;
import cn.spdb.harrier.common.utils.CollectionUtils;
import cn.spdb.harrier.common.utils.Host;
import cn.spdb.harrier.common.utils.Stopper;
import cn.spdb.harrier.common.utils.Symbol;
import cn.spdb.harrier.common.utils.URI;
import cn.spdb.harrier.dao.entity.UdsComplement;
import cn.spdb.harrier.dao.entity.UdsJobComplement;
import cn.spdb.harrier.dao.entity.UdsJobDateFrequency;
import cn.spdb.harrier.dao.entity.UdsJobTimeTrigger;
import cn.spdb.harrier.dao.mapper.UdsComplementMapper;
import cn.spdb.harrier.dao.mapper.UdsJobComplementMapper;
import cn.spdb.harrier.dao.mapper.UdsJobDateFrequencyMapper;
import cn.spdb.harrier.dao.mapper.UdsJobDependencyMapper;
import cn.spdb.harrier.dao.mapper.UdsJobTimeTriggerMapper;
import cn.spdb.harrier.dao.utils.BeanContext;
import cn.spdb.harrier.rpc.client.RpcClient;
import cn.spdb.harrier.server.entity.ComplementIns;
import cn.spdb.harrier.server.entity.JobExecutionContext;
import cn.spdb.harrier.server.master.MasterManagerService;
import cn.spdb.harrier.server.master.cache.MasterMangerWorker;
import cn.spdb.harrier.server.worker.rpc.transport.WorkTransportServerInterface;

@Component
public class ComplementService implements Runnable {

	private final Logger logger = LoggerFactory.getLogger(ComplementService.class.getSimpleName());

//	private ConcurrentHashMap<Long, ComplementIns> cacheMap = new ConcurrentHashMap<Long, ComplementIns>();

	private LinkedBlockingQueue<ComplementIns> complementQueue = new LinkedBlockingQueue<ComplementIns>();

//	private ExecutorService executorService = Executors.newFixedThreadPool(Runtime.getRuntime().availableProcessors());

	public void addComplementIns(ComplementIns e) {
		complementQueue.add(e);
	}

	@PostConstruct
	public void start() {
		Thread thread = new Thread(this, "Master-Complement-Execute-Manager-Thread");
		thread.setDaemon(true);
		thread.start();
	}

	@Autowired
	private UdsComplementMapper complementMapper;
	@Autowired
	private UdsJobComplementMapper jobComplementMapper;
	@Autowired
	private UdsJobDateFrequencyMapper frequencyMapper;
	@Autowired
	private UdsJobTimeTriggerMapper triggerMapper;

/**
 * 补数任务调度服务的主运行方法。
 *
 * 该方法是一个持续运行的线程循环，负责从队列中取出补数任务实例（ComplementIns），
 * 初始化任务上下文、构建执行图谱，并最终将任务分发到指定 Worker 节点上执行。
 *
 * 主要流程如下：
 * - 设置补数任务 ID 和并发限制；
 * - 获取目标执行节点（Worker）；
 * - 构建每个作业的执行上下文；
 * - 设置定时规则（cron）；
 * - 构建 DAG 图谱并设置依赖关系；
 * - 更新数据库状态；
 * - 通过 RPC 将任务发送给 Worker 执行。
 */
@Override
public void run() {
    while (Stopper.isRunning()) {
        try {
            // 从队列中获取一个补数任务实例
            ComplementIns complementIns = complementQueue.take();
            UdsComplement uc = complementIns.getUdsComplement();

            // 为补数任务分配唯一 ID
            uc.setId(BeanContext.getBean(MasterManagerService.class).getSingleId());

            // 如果最大并发任务数未配置或小于等于0，则默认为1
            if (ObjectUtils.isEmpty(uc.getMaxRunJob()) || uc.getMaxRunJob() <= 0) {
                uc.setMaxRunJob(1);
            }

            // 获取目标执行主机（Worker）
            Host host = BeanContext.getBean(MasterMangerWorker.class)
                    .getWorkerByServerName(
                            Arrays.stream(uc.getServerNameRange().split(Symbol.FEN_HAO_REG)).findFirst().get())
                    .getHost();

            // 构建 JobExecutionContext 列表
            List<JobExecutionContext> jobConList = complementIns.getJobList().stream()
                    // 过滤格式不完整的任务描述
                    .filter(predicate -> predicate.length >= 3)
                    // 创建执行上下文
                    .map(mapper -> {
                        return BeanContext.getBean(MasterManagerService.class).createJobExecutionContext(
                                mapper[0], mapper[1], mapper[2]);
                    })
                    // 过滤掉无效任务（udsJob 或 udsJobConfig 为空）
                    .filter(predicate -> ObjectUtils.isNotEmpty(predicate.getUdsJob())
                            && ObjectUtils.isNotEmpty(predicate.getUdsJobConfig()))
                    // 设置补数ID并根据任务类型加载 cron 表达式
                    .map(mapper -> {
                        mapper.setComplementId(complementIns.getComplementId());
                        if (mapper.getJobType().equals(UdsJobType.D)) {
                            // 类型 D：直接通过，无需 cron
                        } else if (mapper.getJobType().equals(UdsJobType.C)) {
                            // 类型 C：从 timeTrigger 加载 cron 表达式
                            List<UdsJobTimeTrigger> timetrigger = triggerMapper.select(
                                    mapper.getPlatform(), mapper.getSystem(), mapper.getJob());
                            mapper.setTimeCornList(timetrigger.stream()
                                    .map(fm -> fm.getCrontab()).collect(Collectors.toList()));
                        } else {
                            // 其他类型：从 dateFrequency 加载 cron 表达式
                            List<UdsJobDateFrequency> frequencies = frequencyMapper.select(
                                    mapper.getPlatform(), mapper.getSystem(), mapper.getJob());
                            mapper.setCornList(frequencies.stream()
                                    .map(fm -> fm.getCrontab()).collect(Collectors.toList()));
                        }
                        return mapper;
                    })
                    // 再次过滤，确保有可用的 cron 表达式
                    .filter(predicate1 -> predicate1.getJobType().equals(UdsJobType.D)
                            || (predicate1.getJobType().equals(UdsJobType.C)
                                    && !CollectionUtils.isEmpty(predicate1.getTimeCornList()))
                            || !CollectionUtils.isEmpty(predicate1.getCornList()))
                    .collect(Collectors.toList());

            // 遍历所有作业，设置强制启动标志、绑定主机，并构建 DAG 图谱
            jobConList.stream().forEach(action -> {
                action.getUdsJob().setStreamType(StreamType.FORCE_START.getId());
                action.setHost(host);
                complementIns.addNodeInfoJob(action);

                // 如果存在多个作业，查询并设置任务间的依赖关系
                if (jobConList.size() > 1) {
                    BeanContext.getBean(UdsJobDependencyMapper.class)
                            .selectJobDeps(action.getPlatform(), action.getSystem(), action.getJob()).stream()
                            .filter(predicate -> complementIns.getDag()
                                    .containsNode(predicate.getDepPlatform() + Symbol.XIA_HUA_XIAN
                                            + predicate.getDepSystem() + Symbol.XIA_HUA_XIAN
                                            + predicate.getDepJob()))
                            .forEach(dep2job -> complementIns.addEdgeJob(dep2job));
                }
            });

            // 如果没有有效作业，标记补数任务失败
            if (jobConList.size() <= 0) {
                uc.setLastStatus(ExecutionStatus.FAILURE.name());
                complementMapper.insert(uc);
                continue;
            } else {
                // 否则标记为就绪状态并插入记录
                uc.setLastStatus(ExecutionStatus.READY.name());
                complementMapper.insert(uc);
            }

            // 插入每个作业的补数记录（UdsJobComplement）
            complementIns.getDag().getNodeInfoValues().forEach(action -> {
                UdsJobComplement row = new UdsJobComplement();
                row.setComplementId(uc.getId());
                row.setPlatform(action.getPlatform());
                row.setSystems(action.getSystem());
                row.setJob(action.getJob());
                row.setLastStatus(ExecutionStatus.READY.name());
                jobComplementMapper.insert(row);
            });

            // 通过 RPC 发送补数任务到目标 Worker 节点
            WorkTransportServerInterface workTransportServerInterface = RpcClient.getInstance()
                    .create(WorkTransportServerInterface.class, new URI("spdb", host.getIp(), host.getPort()));

            UdsJobComplement row = new UdsJobComplement();
            row.setId(complementIns.getComplementId());

            // 如果发送失败，更新状态为失败
            if (!workTransportServerInterface.dispathcerComplement(complementIns)) {
                row.setLastStatus(ExecutionStatus.FAILURE.name());
                jobComplementMapper.updateByPrimaryKeySelective(row);
            }
        } catch (Exception e) {
            logger.error("补数任务处理异常", e);
        }
    }
}


	public int updateUdsComplement(Long complementId,ExecutionStatus executionStatus) {
		UdsComplement row=new UdsComplement();
		row.setId(complementId);
		row.setLastStatus(executionStatus.name());
		return complementMapper.updateByPrimaryKeySelective(row);
	}
	public int updateUdsJobComplement(JobExecutionContext jobContext) {
		UdsJobComplement row=new UdsJobComplement();
		row.setComplementId(jobContext.getComplementId());
		row.setEndTime(jobContext.getUdsJob().getEndTime());
		row.setJob(jobContext.getJob());
		row.setJobDate(jobContext.getUdsJob().getJobDate());
		row.setLastStatus(jobContext.getUdsJob().getLastStatus());
		row.setMultiBatch(jobContext.getUdsJob().getMultiBatch());
		row.setPlatform(jobContext.getUdsJob().getPlatform());
		row.setServerName(jobContext.getUdsJob().getServerName());
		row.setStartTime(jobContext.getUdsJob().getStartTime());
		row.setSystems(jobContext.getUdsJob().getSystems());
		return jobComplementMapper.updateByPrimaryIdSelective(row);
	}

}
