package com.example.kafka.service;

import com.example.kafka.config.KafkaConfig;
import com.example.kafka.model.Message;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import lombok.RequiredArgsConstructor;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.messaging.handler.annotation.Payload;
import org.springframework.stereotype.Service;

@Slf4j
@Service
@RequiredArgsConstructor
public class KafkaConsumerService {

    private final ObjectMapper objectMapper;
    
    @Value("${spring.kafka.bootstrap-servers}")
    private String bootstrapServers;

    /**
     * 消费简单消息
     */
    @KafkaListener(
        topics = KafkaConfig.TOPIC_TEST,
        groupId = KafkaConfig.TOPIC_GROUP,
        containerFactory = "kafkaListenerContainerFactory",
        id = "simpleMessageListener",
        autoStartup = "true",
        properties = {
            "bootstrap.servers=${spring.kafka.bootstrap-servers}",
            "auto.offset.reset=earliest",
            "enable.auto.commit=false"
        }
    )
    public void consumeMessage(
            @Payload String message,
            @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
            @Header(KafkaHeaders.RECEIVED_PARTITION) int partition,
            @Header(KafkaHeaders.OFFSET) long offset,
            Acknowledgment ack
    ) {
        try {
            log.info("【消费简单消息】收到消息: topic = {}, partition = {}, offset = {}, message = {}",
                topic, partition, offset, message);
            
            // 处理消息的业务逻辑
            processMessage(message);
            
            // 手动提交偏移量
            ack.acknowledge();
        } catch (Exception e) {
            log.error("消息处理失败: {}", message, e);
            // 消息处理失败，这里可以进行重试或者写入死信队列
        }
    }

    /**
     * 消费消息对象
     */
    @KafkaListener(
        topics = KafkaConfig.TOPIC_TEST,
        groupId = KafkaConfig.TOPIC_GROUP + "-json",
        containerFactory = "kafkaListenerContainerFactory",
        id = "jsonMessageListener",
        autoStartup = "true",
        properties = {
            "bootstrap.servers=${spring.kafka.bootstrap-servers}",
            "auto.offset.reset=earliest",
            "enable.auto.commit=false"
        }
    )
    public void consumeMessageObject(ConsumerRecord<String, String> record, Acknowledgment ack) {
        String key = record.key();
        String value = record.value();
        
        try {
            Message message = objectMapper.readValue(value, Message.class);
            log.info("【消费消息对象】收到消息: key = {}, message = {}", key, message);
            
            // 处理消息对象的业务逻辑
            processMessageObject(message);
            
            // 手动提交偏移量
            ack.acknowledge();
        } catch (JsonProcessingException e) {
            log.error("消息反序列化失败: {}", value, e);
        } catch (Exception e) {
            log.error("消息对象处理失败: {}", value, e);
            // 消息处理失败，这里可以进行重试或者写入死信队列
        }
    }

    /**
     * 处理简单消息的业务逻辑
     */
    private void processMessage(String message) {
        // 这里实现您的业务逻辑
        log.info("正在处理消息: {}", message);
    }

    /**
     * 处理消息对象的业务逻辑
     */
    private void processMessageObject(Message message) {
        // 这里实现您的业务逻辑
        log.info("正在处理消息对象: {}", message);
    }
} 