package com.innodealing.offsetskip.aspect;

import com.innodealing.offsetskip.annotation.KafkaOffsetSkip;
import com.innodealing.offsetskip.handler.KafkaOffsetSkipHandler;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.aspectj.lang.ProceedingJoinPoint;
import org.aspectj.lang.annotation.Around;
import org.aspectj.lang.annotation.Aspect;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.kafka.support.KafkaUtils;

import java.util.ArrayList;
import java.util.List;
import java.util.concurrent.ConcurrentHashMap;

/**
 * Kafka Offset Skipper AOP 切面
 * 通过 AOP 拦截带有 @KafkaOffsetSkipper 注解的方法，
 * 在消费失败时根据配置决定是否跳过消息并自动提交 offset
 */
@Aspect
public class KafkaOffsetSkipAspect {
    private static final Logger logger = LoggerFactory.getLogger(KafkaOffsetSkipAspect.class);

    // 使用更细粒度的锁来提高并发性能，key格式: topic:groupId:partition
    private static final ConcurrentHashMap<String, Long> partitionSkipOffsets = new ConcurrentHashMap<>();

    private final KafkaOffsetSkipHandler handler;

    public KafkaOffsetSkipAspect(KafkaOffsetSkipHandler handler) {
        this.handler = handler;
    }

    @Around("@annotation(kafkaOffsetSkip)")
    public Object aroundKafkaListener(ProceedingJoinPoint joinPoint, KafkaOffsetSkip kafkaOffsetSkip) throws Throwable {
        // 提取消息记录和确认对象，支持单条和批量消息
        MessageContext context = extractMessageContext(joinPoint.getArgs());
        if (context.isEmpty()) {
            logger.warn("[KafkaOffsetSkipper] 参数中未找到ConsumerRecord或List<ConsumerRecord>");
            return joinPoint.proceed();
        }

        // 检查是否需要跳过消息（正常处理时）
        if (context.isBatch()) {
            return handleBatchMessages(joinPoint, context);
        } else {
            return handleSingleMessage(joinPoint, context);
        }
    }

    /**
     * 提取消息上下文信息，支持单条和批量消息
     */
    @SuppressWarnings("unchecked")
    private MessageContext extractMessageContext(Object[] args) {
        ConsumerRecord<?, ?> singleRecord = null;
        List<ConsumerRecord<?, ?>> batchRecords = null;
        Acknowledgment acknowledgment = null;

        for (Object arg : args) {
            if (arg instanceof ConsumerRecord && singleRecord == null) {
                singleRecord = (ConsumerRecord<?, ?>) arg;
            } else if (arg instanceof List && batchRecords == null) {
                // 检查是否为 ConsumerRecord 列表
                List<?> list = (List<?>) arg;
                if (!list.isEmpty() && list.get(0) instanceof ConsumerRecord) {
                    batchRecords = (List<ConsumerRecord<?, ?>>) list;
                }
            } else if (arg instanceof Acknowledgment && acknowledgment == null) {
                acknowledgment = (Acknowledgment) arg;
            }

            // 如果找到了消息和确认对象，可以提前退出
            if ((singleRecord != null || batchRecords != null) && acknowledgment != null) {
                break;
            }
        }

        return new MessageContext(singleRecord, batchRecords, acknowledgment);
    }

    /**
     * 处理单条消息
     */
    private Object handleSingleMessage(ProceedingJoinPoint joinPoint, MessageContext context) throws Throwable {
        ConsumerRecord<?, ?> record = context.singleRecord;
        String topic = record.topic();
        String groupId = KafkaUtils.getConsumerGroupId();
        final int partition = record.partition();
        final long offset = record.offset();
        final String partitionKey = buildPartitionKey(topic, groupId, partition);

        // 检查是否需要跳过当前消息（正常处理时）
        if (shouldSkipMessage(partitionKey, groupId, topic, partition, offset)) {
            logSkippedMessage(groupId, topic, partition, offset);
            cleanupIfLastMessage(partitionKey, groupId, topic, partition, offset);
            return null;
        }

        try {
            return joinPoint.proceed();
        } catch (Exception e) {
            // 异常处理时检查是否需要跳过
            return handleSingleException(e, context, partitionKey, groupId, topic, partition, offset);
        }
    }

    /**
     * 处理批量消息 - 支持精确过滤，只跳过需要跳过的消息
     */
    private Object handleBatchMessages(ProceedingJoinPoint joinPoint, MessageContext context) throws Throwable {
        List<ConsumerRecord<?, ?>> records = context.batchRecords;
        String topic = records.get(0).topic();
        String groupId = KafkaUtils.getConsumerGroupId();
        
        // 过滤出需要处理的消息（移除需要跳过的消息）
        List<ConsumerRecord<?, ?>> filteredRecords = new ArrayList<>();
        List<ConsumerRecord<?, ?>> skippedRecords = new ArrayList<>();
        
        for (ConsumerRecord<?, ?> record : records) {
            final int partition = record.partition();
            final long offset = record.offset();
            final String partitionKey = buildPartitionKey(topic, groupId, partition);
            
            if (shouldSkipMessage(partitionKey, groupId, topic, partition, offset)) {
                skippedRecords.add(record);
                logSkippedMessage(groupId, topic, partition, offset);
                cleanupIfLastMessage(partitionKey, groupId, topic, partition, offset);
            } else {
                filteredRecords.add(record);
            }
        }
        
        // 如果有跳过的消息，记录详细日志
        if (!skippedRecords.isEmpty()) {
            logger.warn("[KafkaOffsetSkipper] 批量消息精确过滤: 总数={}, 跳过={}, 处理={} groupId={}, topic={}", 
                       records.size(), skippedRecords.size(), filteredRecords.size(), groupId, topic);
            
            // 记录跳过的具体消息
            for (ConsumerRecord<?, ?> skipped : skippedRecords) {
                logger.debug("[KafkaOffsetSkipper] 跳过消息: partition={}, offset={}", 
                           skipped.partition(), skipped.offset());
            }
        }
        
        // 如果所有消息都需要跳过，直接返回
        if (filteredRecords.isEmpty()) {
            logger.warn("[KafkaOffsetSkipper] 批量中所有消息都被跳过 groupId={}, topic={}, totalSize={}", 
                       groupId, topic, records.size());
            return null;
        }
        
        // 如果需要过滤，创建新的参数数组并替换原始批量数据
        if (!skippedRecords.isEmpty()) {
            return processFilteredBatch(joinPoint, context, filteredRecords, topic, groupId);
        }
        
        // 没有需要跳过的消息，正常处理
        try {
            return joinPoint.proceed();
        } catch (Exception e) {
            // 异常处理时检查批量中的消息
            return handleBatchException(e, context, topic, groupId);
        }
    }

    /**
     * 处理过滤后的批量消息
     * 创建新的参数数组，用过滤后的消息列表替换原始批量数据
     */
    private Object processFilteredBatch(ProceedingJoinPoint joinPoint, MessageContext context, 
                                       List<ConsumerRecord<?, ?>> filteredRecords, String topic, String groupId) throws Throwable {
        try {
            // 获取原始参数数组
            Object[] args = joinPoint.getArgs();
            Object[] newArgs = new Object[args.length];
            
            // 复制参数数组，替换批量消息列表
            for (int i = 0; i < args.length; i++) {
                if (args[i] instanceof List && !((List<?>) args[i]).isEmpty() && 
                    ((List<?>) args[i]).get(0) instanceof ConsumerRecord) {
                    // 替换为过滤后的消息列表
                    newArgs[i] = filteredRecords;
                } else {
                    // 保持其他参数不变（如 Acknowledgment）
                    newArgs[i] = args[i];
                }
            }
            
            // 使用反射调用方法，传入过滤后的参数
            return joinPoint.getTarget()
                    .getClass()
                    .getMethod(joinPoint.getSignature().getName(), getParameterTypes(args))
                    .invoke(joinPoint.getTarget(), newArgs);
                    
        } catch (Exception e) {
            logger.error("[KafkaOffsetSkipper] 处理过滤后的批量消息失败: {}", e.getMessage(), e);
            // 如果处理过滤后的消息失败，检查是否需要跳过
            return handleFilteredBatchException(e, context, filteredRecords, topic, groupId);
        }
    }
    
    /**
     * 获取参数类型数组
     */
    private Class<?>[] getParameterTypes(Object[] args) {
        Class<?>[] types = new Class<?>[args.length];
        for (int i = 0; i < args.length; i++) {
            if (args[i] != null) {
                types[i] = args[i].getClass();
                // 处理泛型类型
                if (args[i] instanceof List) {
                    types[i] = List.class;
                } else if (args[i] instanceof Acknowledgment) {
                    types[i] = Acknowledgment.class;
                }
            }
        }
        return types;
    }
    
    /**
     * 处理过滤后批量消息的异常情况
     */
    private Object handleFilteredBatchException(Exception e, MessageContext context, 
                                               List<ConsumerRecord<?, ?>> filteredRecords, String topic, String groupId) throws Exception {
        boolean shouldSkipBatch = false;
        
        // 检查过滤后的批量中是否有需要跳过的消息
        for (ConsumerRecord<?, ?> record : filteredRecords) {
            final int partition = record.partition();
            final long offset = record.offset();
            final String partitionKey = buildPartitionKey(topic, groupId, partition);
            
            if (handler.shouldSkip(groupId, topic, partition, offset)) {
                shouldSkipBatch = true;
                // 记录需要跳过的offset
                partitionSkipOffsets.put(partitionKey, offset);
                logSkippedMessage(groupId, topic, partition, offset);
            }
        }
        
        if (!shouldSkipBatch) {
            throw e;
        }
        
        logger.warn("[KafkaOffsetSkipper] 过滤后的批量消息处理异常，跳过剩余消息 groupId={}, topic={}, remainingSize={}", 
                   groupId, topic, filteredRecords.size());
        
        // 自动提交offset（如果有Acknowledgment）
        if (context.acknowledgment != null) {
            context.acknowledgment.acknowledge();
            logger.warn("[KafkaOffsetSkipper] 已自动提交过滤后批量offset groupId={}, topic={}, processedSize={}",
                       groupId, topic, filteredRecords.size());
        }
        
        return null;
    }

    /**
     * 构建分区键
     */
    private String buildPartitionKey(String topic, String groupId, int partition) {
        return topic + ":" + groupId + ":" + partition;
    }

    /**
     * 检查是否需要跳过消息
     */
    private boolean shouldSkipMessage(String partitionKey, String groupId, String topic, int partition, long offset) {
        Long skipOffset = partitionSkipOffsets.get(partitionKey);
        return skipOffset != null || handler.shouldSkip(groupId, topic, partition, offset);
    }

    /**
     * 记录跳过的消息
     */
    private void logSkippedMessage(String groupId, String topic, int partition, long offset) {
        logger.warn("[KafkaOffsetSkipper] 跳过消息 groupId={}, topic={}, partition={}, offset={}",
                groupId, topic, partition, offset);
    }

    /**
     * 如果是最后一条消息则清理状态
     */
    private void cleanupIfLastMessage(String partitionKey, String groupId, String topic, int partition, long offset) {
        if (handler.isLastMessageSkipped(groupId, topic, partition, offset)) {
            partitionSkipOffsets.remove(partitionKey);
            logger.debug("[KafkaOffsetSkipper] 清理跳过状态 groupId={}, topic={}, partition={}",
                    groupId, topic, partition);
        }
    }

    /**
     * 处理单条消息的异常情况
     */
    private Object handleSingleException(Exception e, MessageContext context, String partitionKey,
                                         String groupId, String topic, int partition, long offset) throws Exception {
        if (!handler.shouldSkip(groupId, topic, partition, offset)) {
            throw e;
        }

        // 记录需要跳过的offset
        partitionSkipOffsets.put(partitionKey, offset);

        logSkippedMessage(groupId, topic, partition, offset);

        // 自动提交offset（如果有Acknowledgment）
        if (context.acknowledgment != null) {
            context.acknowledgment.acknowledge();
            logger.warn("[KafkaOffsetSkipper] 已自动提交offset groupId={}, topic={}, partition={}, offset={}",
                    groupId, topic, partition, offset);
        }

        return null;
    }

    /**
     * 处理批量消息的异常情况
     */
    private Object handleBatchException(Exception e, MessageContext context, String topic, String groupId) throws Exception {
        List<ConsumerRecord<?, ?>> records = context.batchRecords;
        boolean shouldSkipBatch = false;

        // 检查批量中是否有需要跳过的消息
        for (ConsumerRecord<?, ?> record : records) {
            final int partition = record.partition();
            final long offset = record.offset();
            final String partitionKey = buildPartitionKey(topic, groupId, partition);

            if (handler.shouldSkip(groupId, topic, partition, offset)) {
                shouldSkipBatch = true;
                // 记录需要跳过的offset
                partitionSkipOffsets.put(partitionKey, offset);
                logSkippedMessage(groupId, topic, partition, offset);
            }
        }

        if (!shouldSkipBatch) {
            throw e;
        }

        logger.warn("[KafkaOffsetSkipper] 批量消息处理异常，跳过整个批次 groupId={}, topic={}, batchSize={}",
                groupId, topic, records.size());

        // 自动提交offset（如果有Acknowledgment）
        if (context.acknowledgment != null) {
            context.acknowledgment.acknowledge();
            logger.warn("[KafkaOffsetSkipper] 已自动提交批量offset groupId={}, topic={}, batchSize={}",
                    groupId, topic, records.size());
        }

        return null;
    }

    /**
     * 消息上下文内部类，支持单条和批量消息
     */
    private static class MessageContext {
        final ConsumerRecord<?, ?> singleRecord;
        final List<ConsumerRecord<?, ?>> batchRecords;
        final Acknowledgment acknowledgment;

        MessageContext(ConsumerRecord<?, ?> singleRecord, List<ConsumerRecord<?, ?>> batchRecords, Acknowledgment acknowledgment) {
            this.singleRecord = singleRecord;
            this.batchRecords = batchRecords;
            this.acknowledgment = acknowledgment;
        }

        /**
         * 检查是否为空（既没有单条消息也没有批量消息）
         */
        boolean isEmpty() {
            return singleRecord == null && (batchRecords == null || batchRecords.isEmpty());
        }

        /**
         * 检查是否为批量消息
         */
        boolean isBatch() {
            return batchRecords != null && !batchRecords.isEmpty();
        }
    }
}
