package com.br.aiops.receiver.es;

import com.alibaba.fastjson.JSON;
import com.br.aiops.receiver.es.build.Message;
import com.br.aiops.receiver.es.build.MessageBuilder;
import com.br.aiops.receiver.es.config.Config;
import com.br.aiops.receiver.es.config.ConfigCache;
import com.br.aiops.receiver.es.query.DSL;
import com.br.aiops.receiver.es.query.Query;
import com.br.aiops.receiver.es.query.Result;
import com.br.aiops.receiver.es.query.ResultFilter;
import com.google.common.collect.Lists;
import org.apache.commons.collections4.CollectionUtils;
import org.apache.commons.lang3.StringUtils;
import org.apache.commons.lang3.time.DateUtils;
import org.jetbrains.annotations.NotNull;
import org.jetbrains.annotations.Nullable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.scheduling.annotation.SchedulingConfigurer;
import org.springframework.scheduling.config.CronTask;
import org.springframework.scheduling.config.ScheduledTask;
import org.springframework.scheduling.config.ScheduledTaskRegistrar;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReentrantLock;
import java.util.stream.Collectors;

@Component
public class UpdateTaskConfigSchedule implements SchedulingConfigurer {

    private Logger LOGGER = LoggerFactory.getLogger(UpdateTaskConfigSchedule.class);

    public static final String COMPUTER_TYPE = "count";

    @Autowired
    private ConfigCache configCache;

    @Autowired
    private Query query;

    @Autowired
    private KafkaTemplate<String, String> kafkaTemplate;

    @Value("${spring.kafka.consumer.partition-count}")
    private Integer partitionCount;

    @Value("${spring.kafka.consumer.topic}")
    private String topic;

    private ScheduledTaskRegistrar scheduledTaskRegistrar;

    private Map<Long, ScheduledTask> scheduledTasks = new ConcurrentHashMap<>();

    //上一次存储的configs。用于对比是否更新
    private Map<Long, Config> lastConfigs = new ConcurrentHashMap<>();

    private Lock lock = new ReentrantLock();

    /**
     * 定时从数据库中拉取配置信息，创建对应的定时任务
     */
    @Scheduled(fixedDelay = 60 * 1000)
    public void run() {
        LOGGER.info("更新从es获取任务的配置。");
        //获取配置
        List<Config> configs = configCache.getConfigs();

        try {
            lock.lock();
            //停止已经删除的task，并且从map中删除
            Set<Long> configIds = configs.stream().map(Config::getId).collect(Collectors.toSet());
            scheduledTasks.entrySet().stream()
                    .filter(entry -> !configIds.contains(entry.getKey()))
                    .forEach(entry -> {
                        LOGGER.info("停止并删除task: {}。", entry.getKey());
                        entry.getValue().cancel();
                        scheduledTasks.remove(entry.getKey());
                    });

            //筛选发生过更改的配置
            configs.stream().filter(config -> {
                if (lastConfigs.containsKey(config.getId())) {
                    return !Objects.equals(lastConfigs.get(config.getId()).getUpdateTime(), config.getUpdateTime());
                }
                return false;
            }).forEach(config -> {
                LOGGER.info("task已经修改，停止并删除task: {}。", config.getId());
                scheduledTasks.get(config.getId()).cancel();
                scheduledTasks.remove(config.getId());
            });

            //将新增的task添加到scheduled tasks
            configs.stream()
                    .filter(config -> !scheduledTasks.containsKey(config.getId()))
                    .forEach(config -> {
                        LOGGER.info("添加新增的task: {}。", config);
                        ScheduledTask scheduledTask = scheduledTaskRegistrar.scheduleCronTask(new CronTask(getRunnable(config.getId()), config.getCron()));
                        scheduledTasks.putIfAbsent(config.getId(), scheduledTask);
                    });

            //记录最后configs
            lastConfigs = configs.stream().collect(Collectors.toMap(Config::getId, config -> config));
        } finally {
            lock.unlock();
        }
    }

    /**
     * 定时任务执行的runnable
     * @param configId task config id,避免config从数据库更新后，还引用原来的config
     * @return 返回task
     */
    @NotNull
    private Runnable getRunnable(Long configId) {

        return () -> {
            if (!configCache.containsKey(configId)) {
                return;
            }
            Config config = configCache.get(configId);
            //从es抓取数据并写出到kafka
            long start = DateUtils.truncate(DateUtils.addMinutes(new Date(), - config.getLastMinutes()), Calendar.MINUTE).getTime();
            long end = DateUtils.truncate(new Date(), Calendar.MINUTE).getTime();

            DSL dsl = DSL.builder()
                    .method(config.getMethod())
                    .endpoint(config.getEndpoint())
                    .body(config.getBody())
                    .resultFields(config.getResultFields())
                    .build();
            try {
                Result result = query.query(dsl, start, end);
                LOGGER.info("查询结果为：{}", result);
                List<Message> kafkaMessages = buildMessages(config, result);
                LOGGER.info("生成的kafka消息为：{}", kafkaMessages);

                //output to kafka
                String key = config.getService() + "_" + config.getErrorCode();
                int partition = Math.abs(key.hashCode()) % partitionCount;
                if (CollectionUtils.isNotEmpty(kafkaMessages)) {
                    for (Message message : kafkaMessages) {
                        LOGGER.info("往kafka发送消息：{}", message);
                        kafkaTemplate.send(topic, partition, key, JSON.toJSONString(message));
                    }
                }
            } catch (IOException e) {
                LOGGER.error("获取es数据错误或写出kafka失败。", e);
            }
        };
    }

    @Nullable
    private List<Message> buildMessages(Config config, Result result) {
        MessageBuilder messageBuilder = MessageBuilder.builder()
                .service(config.getService())
                .errorCode(config.getErrorCode())
                .msg(config.getMessage())
                .hosts(result.getHosts())
                .build();

        //build messages
        switch (config.getResultType()) {
            case Config.RESULT_TYPE_TOTAL:
                return messageBuilder.buildMessage(result.getTotalCount(), result.getMessage());
            case Config.RESULT_TYPE_LIST:
                return messageBuilder.buildMessage(result.getRecords(), result.getMessage());
            case Config.RESULT_TYPE_GROUP:
                //分组只支持count类型
                ResultFilter filter = result.compute(config.getGroupField());
                //添加了过滤条件
                if (StringUtils.isNotBlank(config.getFilterOperator())) {
                    filter = filter.filter(new ResultFilter.Condition(config.getFilterOperator(), config.getFilterValue()));
                }
                return messageBuilder.buildMessage(filter.getResult(), COMPUTER_TYPE, result.getMessage());
            default:
                return Lists.newArrayList();
        }
    }

    public Map<Long, ScheduledTask> getScheduledTasks() {
        return scheduledTasks;
    }

    @Override
    public void configureTasks(ScheduledTaskRegistrar taskRegistrar) {
        taskRegistrar.setScheduler(taskExecutor());
        this.scheduledTaskRegistrar = taskRegistrar;
    }

    @Bean(destroyMethod = "shutdown")
    public Executor taskExecutor() {
        return Executors.newScheduledThreadPool(32);
    }
}
