package com.example.demo.config;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.listener.BatchErrorHandler;
import org.springframework.stereotype.Component;

/**
 * 自定义批量错误处理器
 * 与KafkaOffsetSkipAspect配合工作，提供更精细的错误处理控制
 */
@Component
public class CustomBatchErrorHandler implements BatchErrorHandler {
    
    private static final Logger logger = LoggerFactory.getLogger(CustomBatchErrorHandler.class);
    
    @Override
    public void handle(Exception thrownException, ConsumerRecords<?, ?> data) {
        
        logger.warn("[CustomBatchErrorHandler] 批量消息处理异常，记录数量: {}, 异常: {}", 
                   data.count(), thrownException.getMessage());
        
        // 记录每条失败的消息信息
        for (ConsumerRecord<?, ?> record : data) {
            logger.warn("[CustomBatchErrorHandler] 失败消息详情: topic={}, partition={}, offset={}, key={}, value={}", 
                       record.topic(), record.partition(), record.offset(), record.key(), record.value());
        }
        
        // 这里不直接处理重试，而是让异常继续抛出
        // KafkaOffsetSkipAspect会根据配置决定是否跳过这些消息
        logger.info("[CustomBatchErrorHandler] 将异常传递给KafkaOffsetSkipAspect处理");
        
        // 重新抛出异常，让上层处理
        if (thrownException instanceof RuntimeException) {
            throw (RuntimeException) thrownException;
        } else {
            throw new RuntimeException("批量消息处理失败", thrownException);
        }
    }
}
