package com.hefei.garden.service.impl;

import com.alibaba.cloud.commons.lang.StringUtils;
import com.hefei.garden.config.kafka.config.InitConfig;
import com.hefei.garden.config.kafka.config.KafkaTopicStrategyContext;
import com.hefei.garden.config.redis.RedisUtils;
import com.hefei.garden.pojo.KafkaBaseDto;
import com.hefei.garden.service.KafkaConsumerService;
import org.apache.commons.compress.utils.Lists;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Service;
import org.springframework.util.CollectionUtils;

import java.time.Duration;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.TimeUnit;

import static java.util.concurrent.Executors.newFixedThreadPool;

/**
 * @author: devil
 * @Date: 2022/8/21 16
 * @Description:
 */
@Service
public class KafkaConsumerServiceImpl implements KafkaConsumerService {

    private static final Logger LOGGER = LoggerFactory.getLogger(KafkaConsumerServiceImpl.class);

    private static final String STATUS_RUNNING = "running";
    private static final String STATUS_STOPING = "stoping";
    private static final String STATUS_STOP = "stop";

    /**
     * 获取一个主题的分区个数
     *
     * @param kafkaBaseDto
     * @return
     */
    @Override
    public List<Integer> getKafkaTopicPartition(KafkaBaseDto kafkaBaseDto) {
        // 获取配置参数
        Properties properties = InitConfig.initConsumerConfig(kafkaBaseDto);
        // 消费者创建
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        // 获取当前topic有多少分区
        Map<String, List<PartitionInfo>> topics = consumer.listTopics();
        List<PartitionInfo> topicPartition = topics.get(kafkaBaseDto.getTopic());
        if (CollectionUtils.isEmpty(topicPartition)) {
            return Lists.newArrayList();
        }
        List<Integer> list = Lists.newArrayList();
        for (PartitionInfo partitionInfo : topicPartition) {
            list.add(partitionInfo.partition());
        }
        return list;
    }

    @Override
    public synchronized String startKafkaDataPull(KafkaBaseDto kafkaBaseDto) {
        // 是用于判断当前组和topic对应的分区消费者否停止
        String topicGroupIdPartitionIdKey = kafkaBaseDto.getUniqueKey();
        // 说明是重复启动
        if (StringUtils.equals(RedisUtils.get(topicGroupIdPartitionIdKey), STATUS_RUNNING)) {
            return "start repeat";
        }
        List<Integer> kafkaTopicPartition = this.getKafkaTopicPartition(kafkaBaseDto);
        // 主题错误
        if (CollectionUtils.isEmpty(kafkaTopicPartition)) {
            return "no topic information";
        }
        int partitionId = kafkaBaseDto.getPartitionId();
        // 分区错误
        if (!kafkaTopicPartition.contains(partitionId)) {
            return "no topic partitionId";
        }
        String topic = kafkaBaseDto.getTopic();
        String groupId = kafkaBaseDto.getGroupId();
        // 考虑到一个kafka 的主题不会多，所以后续的线程也不会多， 以后的线程数 = topic * 对应topic的partition 之和
        ExecutorService queryExecutor = newFixedThreadPool(1);
        queryExecutor.execute(new SingleConsumer(new KafkaBaseDto(topic, groupId, partitionId), InitConfig.initConsumerConfig(kafkaBaseDto)));
        // 主要判断是否启动成功
        int retry = 0;
        boolean flag = true;
        while (!StringUtils.equals(RedisUtils.get(topicGroupIdPartitionIdKey), STATUS_RUNNING)) {
            try {
                TimeUnit.MILLISECONDS.sleep(100);
            } catch (Exception ignored) {
            }
            // 如果超过20秒都没有连接成功，放弃
            if (retry++ >= 200) {
                flag = false;
                break;
            }
        }
        return flag ? "success" : "error";
    }

    /**
     * 每个消费者，使用单独的线程
     */
    static class SingleConsumer implements Runnable {

        KafkaBaseDto kafkaBaseDto;
        Properties properties;

        public SingleConsumer(KafkaBaseDto kafkaBaseDto, Properties properties) {
            this.kafkaBaseDto = kafkaBaseDto;
            this.properties = properties;
        }

        @Override
        public void run() {
            // 消费者创建
            KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
            String topic = kafkaBaseDto.getTopic();
            int partitionId = kafkaBaseDto.getPartitionId();
            String topicGroupIdPartitionKey = kafkaBaseDto.getUniqueKey();
            // 订阅主题，可以使用正则
            consumer.assign(Collections.singletonList(new TopicPartition(topic, partitionId)));
            try {
                RedisUtils.set(topicGroupIdPartitionKey, STATUS_RUNNING);
                // 开始判断是哪一种topic处理策略
                KafkaTopicStrategyContext kafkaTopicStrategyContext = new KafkaTopicStrategyContext(kafkaBaseDto);
                out:
                while (StringUtils.equals(RedisUtils.get(topicGroupIdPartitionKey), STATUS_RUNNING)) {
                    // 核心数据接收逻辑，你的业务逻辑
                    ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
                    for (ConsumerRecord<String, String> record : records) {
                        try {
                            // 使用策略模式，对不同的topic使用不同的业务处理，可以自己设计相关消费处理
                            kafkaTopicStrategyContext.businessProcess(record);
                        } catch (Exception e) {
                            LOGGER.error("数据消费错误, 具体信息为: {}", record);
                            // 可以自定义异常信息处理
                            break out;
                        }
                    }
                }
            } finally {
                // 最后在异步提交，默认一秒自动提交
                consumer.commitAsync();
                // 停止当前线程的消费
                RedisUtils.set(topicGroupIdPartitionKey, STATUS_STOP);
            }
            String groupId = kafkaBaseDto.getGroupId();
            LOGGER.info("topic={},group-id={},partition={} 的消费者停止消费", topic, groupId, partitionId);
        }
    }

    @Override
    public synchronized String stopKafkaDataPull(KafkaBaseDto kafkaBaseDto) {
        String topicGroupIdPartitionKey = kafkaBaseDto.getUniqueKey();
        String topicConsumerInfo = RedisUtils.get(topicGroupIdPartitionKey);
        if (StringUtils.isBlank(topicConsumerInfo) || StringUtils.equals(RedisUtils.get(topicGroupIdPartitionKey), STATUS_STOP)) {
            return "success";
        }
        RedisUtils.set(topicGroupIdPartitionKey, STATUS_STOPING);
        while (true) {
            if (StringUtils.equals(RedisUtils.get(topicGroupIdPartitionKey), STATUS_STOP)) {
                break;
            }
        }
        return "success";
    }
}
