package com.ajing.logextractor.consumer;

import ch.qos.logback.classic.LoggerContext;
import com.ajing.logextractor.config.CommonHandlerConfig;
import com.ajing.logextractor.config.YamlConfigLoader;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.stream.Collectors;

@Slf4j
public class LogConsumer {

    private static final int DEFAULT_POLL_RECORDS = 50000;
    private static final int DEFAULT_MAX_BYTES = 1048576000;
    private static final int DEFAULT_MIN_BYTES = 10485760;
    private static final int DEFAULT_MAX_WAIT_MS = 1000;
    private static final int DEFAULT_PATCH_SIZE = 100;
    private static final int DEFAULT_POLL_TIME_OUT = 1000;

    public static void consume() {
        Map<String, Object> kafkaProperties = YamlConfigLoader.getConfig().getKafkaProperties();
        if (kafkaProperties == null) {
            log.error("kafka配置不存在");
            throw new RuntimeException("kafka配置不存在");
        }
        // 设置kafka自动提交
        kafkaProperties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, false);

        //设置数据key和value的序列化处理类
        kafkaProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        kafkaProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
        //设置每次拉取消费者的数据量大小，以及数据大小
        Integer pollRecords = YamlConfigLoader.getConfig().getPollRecords();
        if (pollRecords == null) {
            pollRecords = DEFAULT_POLL_RECORDS;
        }
        Integer maxBytes = YamlConfigLoader.getConfig().getMaxBytes();
        if (maxBytes == null) {
            maxBytes = DEFAULT_MAX_BYTES;
        }
        Integer minBytes = YamlConfigLoader.getConfig().getMinBytes();
        if (minBytes == null) {
            minBytes = DEFAULT_MIN_BYTES;
        }
        Integer maxWaitMs = YamlConfigLoader.getConfig().getMaxWaitMs();
        if (maxWaitMs == null) {
            maxWaitMs = DEFAULT_MAX_WAIT_MS;
        }
        kafkaProperties.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, pollRecords);
        kafkaProperties.put(ConsumerConfig.FETCH_MAX_BYTES_CONFIG, maxBytes);
        kafkaProperties.put(ConsumerConfig.FETCH_MIN_BYTES_CONFIG, minBytes);
        kafkaProperties.put(ConsumerConfig.FETCH_MAX_WAIT_MS_CONFIG, maxWaitMs);
        //创建消息者实例
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(kafkaProperties);
        //订阅topic1的消息
        String topic = kafkaProperties.get("topic").toString();
        if (topic == null || topic.length() == 0) {
            log.error("未配置消息主题");
            throw new RuntimeException("未配置消息主题");
        }
        consumer.subscribe(Collections.singletonList(topic));
        Integer patchSize = YamlConfigLoader.getConfig().getPatchSize();
        if (patchSize == null) {
            patchSize = DEFAULT_PATCH_SIZE;
        }
        Integer pullTimeOut = YamlConfigLoader.getConfig().getPullTimeOut();
        if (pullTimeOut == null) {
            pullTimeOut = DEFAULT_POLL_TIME_OUT;
        }
        List<CommonHandlerConfig> handlerConfigs = YamlConfigLoader.getConfig().getHandlers();
        LoggerContext loggerContext = new LoggerContext();
        List<LogMessageHandler> logMessageHandlers = new ArrayList<>();
        if (null != handlerConfigs && !handlerConfigs.isEmpty()) {
            logMessageHandlers = handlerConfigs.stream().map(config -> new LogMessageHandler(config, loggerContext)).collect(Collectors.toList());
        } else {
            log.error("未配置消费处理器");
            throw new RuntimeException("未配置消费处理器");
        }
        //到服务器中读取记录
        while (true) {
            try {
                ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(pullTimeOut));
                int count = 0;
                List<String> recordList = new ArrayList<>();
                for (ConsumerRecord<String, String> record : records) {
                    count++;
                    recordList.add(record.value());
                    if (count == patchSize) {
                        handleRecords(logMessageHandlers, recordList);
                        count = 0;
                        recordList.clear();
                    }
                }
                if (!recordList.isEmpty()) {
                    handleRecords(logMessageHandlers, recordList);
                }
            } catch (Exception e) {
                log.error("处理消息异常", e);
            } finally {
                consumer.commitSync();
            }
        }
    }

    // 新增的独立处理方法
    private static void handleRecords(List<LogMessageHandler> handlers, List<String> records) {
        handlers.forEach(handler -> handler.handle(records));
    }

}
