package com.framework.service.timedTask.impl;

import com.alibaba.fastjson.JSONObject;
import com.framework.common.enums.job.JobDataKeyEnum;
import com.framework.common.util.other.NumeralUtil;
import com.framework.common.util.system.ApplicationContextUtil;
import com.framework.mapper.module.job.ScheduledTasksLogMapper;
import com.framework.model.module.job.vo.ScheduledTasksLogVo;
import com.framework.service.timedTask.JobHandlerService;
import org.apache.commons.lang3.StringUtils;
import org.quartz.DisallowConcurrentExecution;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.quartz.PersistJobDataAfterExecution;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.scheduling.quartz.QuartzJobBean;
import org.springframework.util.Assert;
import java.util.Date;

/**
 * @author 龘鵺
 * @version 1.0
 * @className com.framework.service.timedTask.impl
 * @description 定时任务调度类
 * DisallowConcurrentExecution
 * 这个注解的作用是防止同一个 JobDetail 的多个实例同时执行。如果一个任务还在执行中，而调度器尝试再次触发该任务，则新的触发会被阻塞，直到当前任务执行完成。也就是说，同一个任务在任意时间点上只能有一个实例在执行。
 * PersistJobDataAfterExecution
 * 这个注解的作用是确保在任务执行完成后，JobDataMap 中的数据被持久化。如果任务执行过程中对 JobDataMap 进行了修改，这些修改会在任务执行结束后被保存，以便下一次任务执行时可以使用更新后的数据。
 * @datetime 2025-04-24 18:07:36
 */
@DisallowConcurrentExecution
@PersistJobDataAfterExecution
public class JobHandlerInvoker extends QuartzJobBean {
    private Logger log = LoggerFactory.getLogger(JobHandlerInvoker.class);
    @Autowired
    private ScheduledTasksLogMapper scheduledTasksLogMapper;

    @Override
    protected void executeInternal(JobExecutionContext jobExecutionContext) throws JobExecutionException {
        // 第一步，获得 Job 数据
        Long jobId = jobExecutionContext.getMergedJobDataMap().getLong(JobDataKeyEnum.JOB_ID.name());
        String jobHandlerName = jobExecutionContext.getMergedJobDataMap().getString(JobDataKeyEnum.JOB_HANDLER_NAME.name());
        String jobHandlerCode = jobExecutionContext.getMergedJobDataMap().getString(JobDataKeyEnum.JOB_HANDLER_CODE.name());
        String jobHandlerParam = jobExecutionContext.getMergedJobDataMap().getString(JobDataKeyEnum.JOB_HANDLER_PARAM.name());
        int refireCount = jobExecutionContext.getRefireCount();
        int retryCount = (Integer) jobExecutionContext.getMergedJobDataMap().getOrDefault(JobDataKeyEnum.JOB_RETRY_COUNT.name(), 0);
        int retryInterval = (Integer) jobExecutionContext.getMergedJobDataMap().getOrDefault(JobDataKeyEnum.JOB_RETRY_INTERVAL.name(), 0);

        // 第二步，执行任务
        Date startTime = new Date();
        JSONObject data = null;
        Throwable exception = null;
        ScheduledTasksLogVo scheduledTasksLogVo = new ScheduledTasksLogVo();
        scheduledTasksLogVo.setScheduledTasksId(jobId);
        scheduledTasksLogVo.setExecuteStartTime(startTime);
        scheduledTasksLogVo.setName(jobHandlerName);
        scheduledTasksLogVo.setCode(jobHandlerCode);
        scheduledTasksLogVo.setParam(jobHandlerParam);
        scheduledTasksLogVo.setExecuteIndex(refireCount + 1);
        try {
            // 执行任务
            data = this.executeInternal(jobHandlerCode, jobHandlerParam);
            scheduledTasksLogVo.setExecuteStatus(NumeralUtil.POSITIVE_NINE_THOUSAND_NINE_HUNDRED_AND_NINETY_NINE);
        } catch (Throwable e) {
            log.error("JobHandlerInvoker.executeInternal.error:{}", e.getMessage());
            exception = e;
            scheduledTasksLogVo.setResult(e.getMessage().length() > 2000 ? e.getMessage().substring(0, 2000) : e.getMessage());
            scheduledTasksLogVo.setExecuteStatus(NumeralUtil.POSITIVE_SEVEN_THOUSAND_SEVEN_HUNDRED_AND_SEVENTY_SEVEN);
        }
        if (data != null) {
            scheduledTasksLogVo.setResult(data.toJSONString().length() > 2000 ? data.toJSONString().substring(0, 2000) : data.toJSONString());
        }
        Date endTime = new Date();
        Long duration = endTime.getTime() - startTime.getTime();
        scheduledTasksLogVo.setCreateId(NumeralUtil.MULTIPLEXING_LONG_POSITIVE_ONE);
        scheduledTasksLogVo.setCreateTime(startTime);
        scheduledTasksLogVo.setOperaterStatus(NumeralUtil.POSITIVE_ONE);
        scheduledTasksLogVo.setExecuteEndTime(endTime);
        scheduledTasksLogVo.setDuration(duration.intValue());

        // 第二步，记录执行日志
        try {
            scheduledTasksLogMapper.insertSelective(scheduledTasksLogVo);
        } catch (Throwable e) {
            log.error("JobHandlerInvoker.executeInternal.scheduledTasksLogMapper.save.error:{}", e.getMessage());
        }
        // 第四步，处理有异常的情况
        handleException(exception, refireCount, retryCount, retryInterval);
    }

    private JSONObject executeInternal(String jobHandlerCode, String jobHandlerParam) throws Exception {
        // 获得 JobHandlerService 对象
        JobHandlerService jobHandlerService = ApplicationContextUtil.getBean(jobHandlerCode, JobHandlerService.class);
        Assert.notNull(jobHandlerService, "ApplicationContextUtil.getBean:JobHandlerService为空");
        JSONObject data = null;
        if (StringUtils.isNotEmpty(jobHandlerParam)) {
            data = JSONObject.parseObject(jobHandlerParam);
        }
        // 执行任务
        return jobHandlerService.execute(data);
    }

    private void handleException(Throwable exception,
                                 int refireCount, int retryCount, int retryInterval) throws JobExecutionException {
        // 如果有异常，则进行重试
        if (exception == null) {
            return;
        }
        // 情况一：如果到达重试上限，则直接抛出异常即可
        if (refireCount >= retryCount) {
            throw new JobExecutionException("重试以上限！");
        }

        // 情况二：如果未到达重试上限，则 sleep 一定间隔时间，然后重试
        // 这里使用 sleep 来实现，主要还是希望实现比较简单。因为，同一时间，不会存在大量失败的 Job。
        if (retryInterval > 0) {
            try {
                Thread.sleep(retryInterval);
            } catch (Exception e) {
                throw new JobExecutionException(e);
            }
        }
        // 第二个参数，refireImmediately = true，表示立即重试
        throw new JobExecutionException(exception, true);
    }

}
