package cn.com.zetatech.loader.common.service.impl;

import cn.com.zetatech.loader.api.FileResolver;
import cn.com.zetatech.loader.api.util.SpringContextUtil;
import cn.com.zetatech.loader.common.constant.LoaderConstant;
import cn.com.zetatech.loader.common.message.ResolverMessage;
import cn.com.zetatech.loader.common.service.ResolverResultService;
import cn.com.zetatech.loader.common.service.ResolverStateControlService;
import cn.hutool.core.collection.CollectionUtil;
import cn.hutool.core.io.FileUtil;
import com.google.gson.Gson;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.TopicPartition;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.data.redis.core.RedisTemplate;
import org.springframework.kafka.core.ConsumerFactory;
import org.springframework.stereotype.Service;

import java.util.*;
import java.util.concurrent.Executor;
import java.util.concurrent.TimeUnit;

/**
 * @author zhengbs
 */
@Service
@ConditionalOnProperty(value = "resolver.state.monitor", havingValue = "redis", matchIfMissing = true)
@Slf4j
public class ResolverStateControlRedisImpl implements ResolverStateControlService {

    @Autowired
    private RedisTemplate redisTemplate;

    @Autowired
    private ConsumerFactory consumerFactory;

    @Autowired
    private ResolverResultService resolverResultService;

    @Autowired
    private Gson gson;

    @Value("${resolver.state.expire:1800000}")
    private long redisKeyExpireTime;

    @Override
    public void initJobMonitor(ResolverMessage resolverMessage, long startTimeMillis) {
        String jobId = resolverMessage.getJobId();
        log.info("<=========> 开启解析任务状态监控，jobId：{}", jobId);
        redisTemplate.opsForHash().entries(jobId);
    }

    @Override
    public long initFileMonitor(ResolverMessage resolverMessage, String remoteFileId, String fileId, Map<String, Object> fileInfoMap, long startTimeMillis) {
        String filePath = (String) fileInfoMap.get(LoaderConstant.File_Local_Path); // 文件本地地址
        log.info("<=========> 开启文件解析状态监控，文件：{}，fileId：{}", filePath, fileId);
        createRedisCache(resolverMessage, filePath, fileId, startTimeMillis);
        long runDuration = 0;
        long maxDuration = redisKeyExpireTime;
        long startTime = System.currentTimeMillis();
        int times = 1;
        boolean done = false;
        // 30分钟结束线程，避免程序异常导致线程无法关闭
        while (System.currentTimeMillis() - startTime < maxDuration) {
            if(times%100==1){
                log.info("<=========> 文件解析状态监控中，fileId：{}", fileId);
            }
            try {
                if(redisTemplate.hasKey(fileId + ":error") && !redisTemplate.hasKey(fileId + ":errorHandled")) {
                    // 如果发生错误，执行失败
                    redisTemplate.opsForValue().set(fileId+":errorHandled", "1", redisKeyExpireTime, TimeUnit.MILLISECONDS);
                    String errorMsg = (String) redisTemplate.opsForValue().get(fileId + ":error");
                    runDuration = System.currentTimeMillis() - startTimeMillis;
                    log.info("<=========> 文件：{}解析失败，耗时：{}", filePath, runDuration);
                    // 检查关联文件
                    if(redisTemplate.hasKey(fileId + ":relateFiles")) {
                        String relateFiles = (String) redisTemplate.opsForValue().get(fileId + ":relateFiles");
                        Map<String, String> relateFileMap = gson.fromJson(relateFiles, Map.class);
                        fileInfoMap.put(LoaderConstant.File_Relate_Remote_Paths, relateFileMap);
                    }
                    // 失败处理
                    resolverResultService.fail(resolverMessage, errorMsg, remoteFileId, fileInfoMap, runDuration, true);
                    // 立即停止消费kafka其他消息
                    List<String> topicPartitionOffsets = redisTemplate.opsForList().range(fileId+":data", 0, -1);
                    if(CollectionUtil.isNotEmpty(topicPartitionOffsets)) {
                        log.info("<=========> 文件：{}解析失败，手动提交kafka中消息：{}", filePath, topicPartitionOffsets);
                        // 手动消费kafka中消息
                        FileResolver fileResolver = (FileResolver) SpringContextUtil.getBean(resolverMessage.getActuatorName());
                        stopDataStoreByKafkaOffset(fileResolver, fileId, topicPartitionOffsets);
                    }
                }

                String flagData = (String) redisTemplate.opsForValue().get(fileId + ":flag");
                if (flagData != null) {
                    // 表示整个文件解析完成，等待数据入库
                    int dataCount = (int) redisTemplate.opsForValue().get(fileId + ":dataCount");
                    if(dataCount == 0) {
                        // 所有数据都处理完
                        if (redisTemplate.hasKey(fileId + ":error")) {
                            if(StringUtils.isNotBlank(flagData)) {
                                log.info("<=========> 文件：{}解析失败，执行回滚操作：{}", filePath, flagData);
                                // 表示文件解析失败，或数据入库失败，数据库回滚
                                FileResolver fileResolver = (FileResolver) SpringContextUtil.getBean(resolverMessage.getActuatorName());
                                fileResolver.failure(flagData);
                            }
                        } else {
                            // 数据入库成功
                            runDuration = System.currentTimeMillis() - startTimeMillis;
                            log.info("<=========> 文件：{}解析成功，耗时：{}", filePath, runDuration);
                            // 检查关联文件
                            if(redisTemplate.hasKey(fileId + ":relateFiles")) {
                                String relateFiles = (String) redisTemplate.opsForValue().get(fileId + ":relateFiles");
                                Map<String, String> relateFileMap = gson.fromJson(relateFiles, Map.class);
                                fileInfoMap.put(LoaderConstant.File_Relate_Remote_Paths, relateFileMap);
                            }
                            // 执行success
                            resolverResultService.success(resolverMessage, remoteFileId, fileInfoMap, runDuration);
                            FileResolver fileResolver = (FileResolver) SpringContextUtil.getBean(resolverMessage.getActuatorName());
                            fileResolver.success(flagData);
                        }
                        // 结束循环
                        maxDuration = 0;
                        done = true;
                    } else {
                        // 数据还没有处理完，稍后再查
                        TimeUnit.MILLISECONDS.sleep(200);
                    }
                } else {
                    // 数据还没有处理完，稍后再查
                    TimeUnit.MILLISECONDS.sleep(200);
                }
                times++;
            } catch (Exception e) {
                log.error(e.getMessage(), e);
                // 结束循环
                maxDuration = 0;
            }
        }
        // 任务结束，删除redis中缓存
        removeRedisCache(fileId);
        log.info("<=========> 结束文件解析状态监控，fileId：{}", fileId);
        // 处理解析超市的情况，认为解析失败
        if(!done) {
            log.info("<=========> 文件：{}-{}解析超时，执行失败操作", filePath, fileId);
            // 检查关联文件
            if(redisTemplate.hasKey(fileId + ":relateFiles")) {
                String relateFiles = (String) redisTemplate.opsForValue().get(fileId + ":relateFiles");
                Map<String, String> relateFileMap = gson.fromJson(relateFiles, Map.class);
                fileInfoMap.put(LoaderConstant.File_Relate_Remote_Paths, relateFileMap);
            }
            // 失败处理
            runDuration = System.currentTimeMillis() - startTimeMillis;
            resolverResultService.fail(resolverMessage, "处理超时", remoteFileId, fileInfoMap, runDuration, true);
        }
        return runDuration;
    }

    private void createRedisCache(ResolverMessage resolverMessage, String filePath, String fileId, long startTimeMillis) {
        redisTemplate.opsForValue().set(fileId+":dataCount", 0l, redisKeyExpireTime, TimeUnit.MILLISECONDS);
        redisTemplate.opsForHash().put(fileId+":cache", "filePath", filePath);
        redisTemplate.opsForHash().put(fileId+":cache", "startTimeMillis", startTimeMillis);
        redisTemplate.opsForHash().put(fileId+":cache", "resolverMessage", gson.toJson(resolverMessage));
        redisTemplate.expire(fileId+":cache", redisKeyExpireTime, TimeUnit.MILLISECONDS);
    }

    private void removeRedisCache(String fileId) {
        redisTemplate.delete(fileId+":dataCount");
        redisTemplate.delete(fileId+":data");
        redisTemplate.delete(fileId+":cache");
        redisTemplate.delete(fileId+":flag");
        redisTemplate.delete(fileId+":error");
        redisTemplate.delete(fileId+":errorHandled");
    }

    @Override
    public void dataStoreHandle(String topic, int partition, long offset, String fileId) {
        // 数据入库消息数量加 1
        redisTemplate.opsForValue().increment(fileId+":dataCount");
        // 数据入库消息信息，加入缓存
        StringBuilder value = new StringBuilder();
        value.append(topic).append("_").append(partition).append("_").append(offset);
        redisTemplate.opsForList().leftPush(fileId+":data", value.toString());
        redisTemplate.expire(fileId+":data", redisKeyExpireTime, TimeUnit.MILLISECONDS);
    }

    @Override
    public void dataStoreSuccessHandle(String topic, int partition, long offset, String fileId) {
        // 数据入库完成（或失败），删除缓存
        StringBuilder data = new StringBuilder();
        data.append(topic).append("_").append(partition).append("_").append(offset);
        redisTemplate.opsForList().remove(fileId+":data", 0, data.toString());
        redisTemplate.opsForValue().decrement(fileId + ":dataCount");
    }

    /**
     * 分两种情况
     * 1. 文件正常解析完毕，等待数据入库
     * 2. 文件解析出错，终止解析
     */
    @Override
    public void completedHandle(String fileId, String flagMsg) {
        redisTemplate.opsForValue().set(fileId+":flag", flagMsg==null?"":flagMsg, redisKeyExpireTime, TimeUnit.MILLISECONDS);
    }

    /**
     * 文件解析出错时调用
     * 创建 /loader/{fileId}/error节点，来表示当前文件已经解析失败
     */
    @Override
    public void errorHandle(String fileId, String errorMsg, boolean completed, String flagMsg) {
        redisTemplate.opsForValue().set(fileId+":error", errorMsg, redisKeyExpireTime, TimeUnit.MILLISECONDS);
        if(completed) {
            completedHandle(fileId, flagMsg);
        }
    }

    @Override
    public void relatedFilesHandle(String fileId, String fileRemotePath, String fileLocalPath) {
        // 将关联的文件放入缓存，后续根据这缓存去移动或者删除关联文件
        Map<String, String> relateFileMap = new HashMap<>();
        if(redisTemplate.hasKey(fileId + ":relateFiles")) {
            String relateFiles = (String) redisTemplate.opsForValue().get(fileId + ":relateFiles");
            relateFileMap = gson.fromJson(relateFiles, Map.class);
        }
        relateFileMap.put(fileRemotePath, fileLocalPath);
        redisTemplate.opsForValue().set(fileId + ":relateFiles", gson.toJson(relateFileMap));
    }

    @Override
    public boolean checkHasError(String fileId) {
        return redisTemplate.hasKey(fileId+":error");
    }

    /**
     * 手动消费kafka中指定offset消息（提前取消其他任务）
     * @param topicPartitionOffsets
     */
    private void stopDataStoreByKafkaOffset(FileResolver fileResolver, String fileId, List<String> topicPartitionOffsets) {
        String groupId = fileResolver.getDataStoreConsumerGroupId();
        if(StringUtils.isBlank(groupId)) {
            throw new RuntimeException("获取kafkaGroupId为空，请检查getKafkaGroupId()方法的返回值！");
        }
        Set<String> topics = new HashSet<>();
        Map<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
        for (String topicPartitionOffset : topicPartitionOffsets) {
            String[] items = topicPartitionOffset.split("_");
            String topic = items[0];
            topics.add(topic);
            offsets.put(new TopicPartition(topic, Integer.parseInt(items[1])), new OffsetAndMetadata(Long.parseLong(items[2])+1));
        }
        Consumer consumer = consumerFactory.createConsumer(groupId, null);
        consumer.subscribe(topics);
        consumer.commitAsync(offsets, (offsets1, e) -> {
            log.info("手动提交kafka中消息结束。");
            offsets1.forEach((tp, offset) -> {
                dataStoreSuccessHandle(tp.topic(), tp.partition(), offset.offset(), fileId);
            });
            if (e != null) {
                log.error(e.getMessage(), e);
            }
        });
    }

}
