package com.sui.bigdata.rtcadmin.timing;

import com.feidee.fdcommon.configuration.CustomConfiguration;
import com.feidee.fdcommon.util.CustomThreadFactory;
import com.google.common.collect.Lists;
import com.sui.bigdata.rtcadmin.async.BackpressureAlarm;
import com.sui.bigdata.rtcadmin.async.JobAlarm;
import com.sui.bigdata.rtcadmin.async.MemoryAlarm;
import com.sui.bigdata.rtcadmin.async.SparkAppAlarm;
import com.sui.bigdata.rtcadmin.constant.AppConstant;
import com.sui.bigdata.rtcadmin.constant.JobConstant;
import com.sui.bigdata.rtcadmin.repository.mapper.AppConfMapper;
import com.sui.bigdata.rtcadmin.repository.mapper.JobConfigMapper;
import com.sui.bigdata.rtcadmin.repository.mapper.JobStatusMapper;
import com.sui.bigdata.rtcadmin.repository.model.JobConfig;
import com.sui.bigdata.rtcadmin.repository.model.SparkApp;
import com.sui.bigdata.rtcadmin.util.AlarmUtils;
import com.sui.bigdata.rtcadmin.util.RedisUtils;
import com.sui.bigdata.rtcadmin.util.SparkAlarmUtil;

import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.scheduling.annotation.EnableAsync;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;

import java.util.Arrays;
import java.util.List;
import java.util.Set;
import java.util.UUID;
import java.util.concurrent.LinkedBlockingQueue;
import java.util.concurrent.ThreadPoolExecutor;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

import static com.sui.bigdata.rtcadmin.util.AlarmUtils.*;

/**
 * @author YongChen
 * @date 2019/12/30 10:57
 * @description
 * @email yong_chen@sui.com
 */
@Component
@EnableScheduling
@EnableAsync
public class Crontab {
    public static final ThreadPoolExecutor threadPool = new ThreadPoolExecutor(CustomConfiguration.getInt("poolSize.LogCollectService", 100), CustomConfiguration.getInt("poolSize.LogCollectService", 100),
            0L, TimeUnit.MILLISECONDS,
            new LinkedBlockingQueue<>(),
            new CustomThreadFactory("AlarmService"));

    private static Logger logger = LoggerFactory.getLogger(Crontab.class);

    private AlarmUtils alarmUtils = new AlarmUtils();

    @Autowired
    private JobConfigMapper jobConfigMapper;
    @Autowired
    private JobStatusMapper jobStatusMapper;

    @Autowired
    AppConfMapper appConfMapper;

    @Autowired
    private JobAlarm jobAlarm;
    @Autowired
    private  RedisUtils redisUtils;

    @Autowired
    private SparkAppAlarm sparkAppAlarm;

    @Autowired
    private SparkAlarmUtil sparkAlarmUtil;

    @Value("${flink.install.path}")
    private String flinkIntallPath;
    @Value("${yarn.conf.path}")
    private String yarnConfPath;
    @Value("${alarm.msg.url}")
    private String alarmMsgUrl;
    @Value("${oneday.max.alarm.time}")
    private Integer todayMaxAlarmTimes;
    @Value("${memory.alarm.threshold}")
    private Integer memoryAlarmThreshold;
    @Value("${memory.recover.threshold}")
    private Integer memoryRecoverThreshold;
    @Value("${legacy.job}")
    private String legacyJob;

    @Scheduled(cron = "0 0/1 * * * ?")
    public void updateTaskStatus() throws Exception {
        logger.info(" updateTaskStatus ................");
        List<JobConfig> jobConfigs = jobConfigMapper.queryOnlineJob();
        List<JobConfig> jobInfos = Lists.newArrayList();
        for (JobConfig jobConfig : jobConfigs) {
            Set<String> set = Arrays.stream(legacyJob.split(",")).collect(Collectors.toSet());
            if(set.contains(jobConfig.getJobName())){
                continue;
            }
            if (null != jobConfig.getAppId() && !"".equals(jobConfig.getAppId())) {
                String lockKey = jobConfig.getJobName() + "_ts";
                String lockValue = UUID.randomUUID().toString();
                try {
                    if (doUpdate(jobConfig) && (redisUtils.get(lockKey) == null)) {
                        if (redisUtils.setScheduler(lockKey, lockValue)) {
                            jobInfos.add(jobConfig);
                        }
                    }
                } catch (Exception e) {
                    logger.error("updateTaskStatus {} exception.", jobConfig.getJobName());
                }
            }
        }
        alarmUtils.getJobStatus(jobInfos, flinkIntallPath, yarnConfPath, jobAlarm);
        for (JobConfig jobConfig : jobConfigs) {
            jobConfigMapper.updateStatusByJobName(jobConfig.getJobName(), jobConfig.getStatus(), JobConstant.JOB_IS_ONLINE);
            jobStatusMapper.updateStatus(jobConfig.getJobName(), jobConfig.getAppId(), jobConfig.getStatus());
        }
    }

    @Scheduled(cron = "0 0/10 * * * ?")
    public void alarmBackpressure() {
        logger.info(" alarmBackpressure ................");
        for (JobConfig jobConfig : jobConfigMapper.queryOnlineJob()) {
            Set<String> set = Arrays.stream(legacyJob.split(",")).collect(Collectors.toSet());
            if(set.contains(jobConfig.getJobName())){
                continue;
            }
            String lockKey = jobConfig.getJobName() + "_bp";
            String lockValue = UUID.randomUUID().toString();
            try {
                if (doMonitor(jobConfig) && (redisUtils.get(lockKey) == null)) {
                    if (redisUtils.setScheduler(lockKey, lockValue)) {
                        List<BackpressureAlarm> backpressureAlarms = BackpressureAlarm.build(jobConfig, alarmMsgUrl, redisUtils, todayMaxAlarmTimes);
                        if (null != backpressureAlarms) {
                            for (BackpressureAlarm backpressureAlarm : backpressureAlarms) {
                                threadPool.execute(backpressureAlarm);
                            }
                        }
                    }
                }
            } catch (Exception e) {
                logger.error("alarmBackpressure {} exception.", jobConfig.getJobName());
            }
        }
    }

    /*@Scheduled(cron = "0 0/10 * * * ?")*/
    public void alarmMemoryUsed() {
        logger.info(" alarmMemoryUsed ................");
        for (JobConfig jobConfig : jobConfigMapper.queryOnlineJob()) {
            String lockKey = jobConfig.getJobName() + "_mu";
            String lockValue = UUID.randomUUID().toString();
            try {
                if (doMonitor(jobConfig) && (redisUtils.get(lockKey) == null)) {
                    if (redisUtils.setScheduler(lockKey, lockValue)) {
                        List<MemoryAlarm> memoryAlarms = MemoryAlarm.build(jobConfig, alarmMsgUrl, redisUtils, todayMaxAlarmTimes, memoryAlarmThreshold, memoryRecoverThreshold);
                        if (null != memoryAlarms) {
                            for (MemoryAlarm memoryAlarm : memoryAlarms) {
                                threadPool.execute(memoryAlarm);
                            }
                        }
                    }
                }
            } catch (Exception e) {
                logger.error("alarmMemoryUsed {} exception.", jobConfig.getJobName());
            }
        }
    }

    @Scheduled(cron = "0 0/1 * * * ?")
    public void updateSparkAppStatus() {
        List<SparkApp> sparkApps = appConfMapper.selectAll(new SparkApp());
        List<SparkApp> newSparkApps = sparkApps.stream()
                .filter((s) -> doScan(s.getAppStatus(), s.getAppName()))
                .filter((s) -> {
                    String lockKey = s.getAppName() + "_ts";
                    String lockValue = UUID.randomUUID().toString();
                    return redisUtils.setScheduler(lockKey, lockValue);
                })
                .collect(Collectors.toList());

        sparkAlarmUtil.scanApplicationStatus(newSparkApps, yarnConfPath, sparkAppAlarm);
        // 重新更新状态
        for (SparkApp sparkApp : sparkApps) {
            appConfMapper.updateStatus(sparkApp.getAppName(), sparkApp.getAppStatus());
            jobStatusMapper.updateStatus(sparkApp.getAppName(), sparkApp.getAppId(), sparkApp.getAppStatus());
        }
    }

    public boolean doScan(String status, String appName) {
        String lockKey = appName + "_ts";
        boolean available = redisUtils.get(lockKey) == null;
        boolean flag = (AppConstant.APP_STATUS_RUNNING.equals(status)) ||
                (AppConstant.APP_STATUS_ACCEPT.equals(status)
                        || AppConstant.APP_STATUS_DISCONNECT.equals(status));

        return flag && available;
    }
}
