package org.pr.kafkaexpress.consumer;

import lombok.extern.slf4j.Slf4j;
import org.pr.kafkaexpress.constants.KafkaConstants;
import org.pr.kafkaexpress.exception.KafkaException;
import org.pr.kafkaexpress.util.MessageUtils;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.KafkaHeaders;
import org.springframework.messaging.handler.annotation.Header;
import org.springframework.messaging.handler.annotation.Payload;
import org.springframework.stereotype.Component;

/**
 * 消息消费者
 * 
 * @author LXC
 * @since 2025/7/10
 */
@Slf4j
@Component
public class MessageConsumer {

    /**
     * 监听单个消息
     * 
     * @param message 消息内容
     * @param topic 主题
     * @param partition 分区
     * @param offset 偏移量
     */
    @KafkaListener(topics = KafkaConstants.TEST_TOPIC, groupId = KafkaConstants.TEST_GROUP)
    public void listen(@Payload String message,
                      @Header(KafkaHeaders.RECEIVED_TOPIC) String topic,
                      @Header(KafkaHeaders.RECEIVED_PARTITION) int partition,
                      @Header(KafkaHeaders.OFFSET) long offset) {
        try {
            log.info("Received message from topic: {}, partition: {}, offset: {}, message: {}", 
                    topic, partition, offset, MessageUtils.formatMessageForLog(message, 100));
            
            // 处理消息的业务逻辑
            processMessage(message);
            
        } catch (Exception e) {
            log.error("Error processing message from topic: {}, partition: {}, offset: {}, message: {}", 
                    topic, partition, offset, MessageUtils.formatMessageForLog(message, 100), e);
            // 这里可以添加重试逻辑或死信队列处理
            throw new KafkaException(topic, message, e);
        }
    }

    /**
     * 批量监听消息
     * 
     * @param messages 消息列表
     */
    @KafkaListener(topics = KafkaConstants.BATCH_TOPIC, groupId = KafkaConstants.BATCH_GROUP, batch = "true")
    public void listenBatch(@Payload java.util.List<String> messages) {
        try {
            log.info("Received batch of {} messages", messages.size());
            
            for (String message : messages) {
                processMessage(message);
            }
            
            log.info("Successfully processed batch of {} messages", messages.size());
            
        } catch (Exception e) {
            log.error("Error processing batch of {} messages", messages.size(), e);
            throw new RuntimeException("Failed to process message batch", e);
        }
    }

    /**
     * 处理消息的业务逻辑
     * 
     * @param message 消息内容
     */
    private void processMessage(String message) {
        // 这里添加具体的业务处理逻辑
        log.info("Processing message: {}", MessageUtils.formatMessageForLog(message, 100));
        
        // 验证消息
        if (!MessageUtils.isValidMessage(message)) {
            log.warn("Received invalid message: {}", MessageUtils.formatMessageForLog(message, 100));
            return;
        }
        
        // 检查是否为心跳消息
        if (MessageUtils.isHeartbeatMessage(message)) {
            log.debug("Received heartbeat message");
            return;
        }
        
        // 示例：简单的消息处理
        try {
            // 可以在这里添加数据库操作、业务逻辑处理等
            String messageHash = MessageUtils.calculateMessageHash(message);
            log.info("Message processed successfully: {}, hash: {}", 
                    MessageUtils.formatMessageForLog(message, 100), messageHash);
        } catch (Exception e) {
            log.error("Error processing message: {}", MessageUtils.formatMessageForLog(message, 100), e);
            throw new KafkaException("Failed to process message", e);
        }
    }
}
