package com.qf.kafka.spring.boot.demo.gpt.service;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.header.Headers;
import org.apache.kafka.common.header.internals.RecordHeader;
import org.apache.kafka.common.header.internals.RecordHeaders;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Service;

import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;

/**
 * 消费者服务类
 */
@Service
public class KafkaConsumerService {

    private final Logger logger = LoggerFactory.getLogger(KafkaConsumerService.class);
    private final KafkaTemplate<String, String> kafkaTemplate;

    public KafkaConsumerService(KafkaTemplate<String, String> kafkaTemplate) {
        this.kafkaTemplate = kafkaTemplate;
    }

    @KafkaListener(topics = "topic_name", containerFactory = "kafkaListenerContainerFactory")
    public void listen(List<ConsumerRecord<String, String>> records, Acknowledgment acknowledgment) {
        Map<TopicPartition, OffsetAndMetadata> currentOffsets = new HashMap<>();
        try {
            for (ConsumerRecord<String, String> record : records) {
                logger.info("Received message: {}", record.value());
                // 消息处理逻辑 (幂等性处理)
                processMessage(record);

                // 手动管理偏移量
                currentOffsets.put(new TopicPartition(record.topic(), record.partition()),
                        new OffsetAndMetadata(record.offset() + 1, null));
            }
            acknowledgment.acknowledge(); // 手动提交偏移量
        } catch (Exception e) {
            logger.error("Error processing message", e);
            kafkaTemplate.send("dead_letter_topic", records.toString()); // 发送到死信队列
        }
    }

    private void processMessage(ConsumerRecord<String, String> record) {
        // 示例幂等性处理逻辑，可以通过数据库记录已处理消息ID
        String messageId = record.headers().lastHeader("message_id").value().toString();
        // 检查数据库中是否已存在该消息ID，若存在则跳过处理
        // 否则进行处理并记录该消息ID
    }



    public  void sendIncludeHeadersDemo(){
        Properties props = new Properties();
        props.put("bootstrap.servers", "localhost:9092");
        props.put("acks", "all");
        props.put("retries", 0);
        props.put("batch.size", 16384);
        props.put("linger.ms", 1);
        props.put("buffer.memory", 33554432);
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        Producer<String, String> producer = new KafkaProducer<>(props);

// 创建消息头
        Headers headers = new RecordHeaders();
        headers.add(new RecordHeader("message_id", "unique_message_id".getBytes()));

// 创建ProducerRecord并添加消息头
        ProducerRecord<String, String> record = new ProducerRecord<>("my-topic", null, "my-key", "my-value", headers);

// 发送消息
        producer.send(record);

// 必须关闭生产者
        producer.close();
    }
}
