package com.china.produce.config.kafka;

import com.alibaba.fastjson.JSON;
import com.alibaba.otter.canal.protocol.FlatMessage;
import com.china.produce.pojo.order.OrderDTO;
import com.china.produce.service.OrderService;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.listener.AbstractConsumerSeekAware;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
import javax.annotation.PostConstruct;
import java.util.List;

/**
 * @ClassName
 * @Author niujie
 * @Version
 * @Description
 * @CreateTime 2023/12/2
 */
@Component
@Slf4j
public class OrderKafkaListener extends AbstractConsumerSeekAware {

    @Autowired
    private OrderService orderService;

    /**
     * 顺序消费并发级别
     */
    private static final Integer CONCURRENT = 3;

    private KafkaConsumerPool kafkaConsumerPool;

    @PostConstruct
    public void init() {
        KafkaSortConsumerConfig<OrderDTO> config = new KafkaSortConsumerConfig<>();
        config.setBizName("order");
        config.setConcurrentSize(CONCURRENT);
        config.setBizService(orderService::solveRetry);
        kafkaConsumerPool = new KafkaConsumerPool(config);
    }

    /**
     * 批量拉去，顺序消费
     *
     * @param records
     * @param ack
     */
    @KafkaListener(topics = "test.kafka.ack3", containerFactory = "batchFactory")
    public void consumeMessage(List<ConsumerRecord<?, ?>> records, Acknowledgment ack) {
        if (records.isEmpty()) {
            return;
        }
        System.out.println("records size : " + records.size());
        //根据id取模 % 队列的条数
        records.forEach(consumerRecord -> {
            OrderDTO order = JSON.parseObject(consumerRecord.value().toString(), OrderDTO.class);
            kafkaConsumerPool.submitTask(order.getId(), order);
        });
        log.info("records size {}", records.size());
        //当线程池中任务处理完成的数等于批量拉到的数据时候，提交offset
        //如果存在部分业务阻塞时间长，会导致位移提交不上去
        while (true) {
           if (records.size() == kafkaConsumerPool.getPendingOffsets().get()) {
               ack.acknowledge();
               log.info("offset 提交 {}", records.get(records.size() - 1).offset());
               kafkaConsumerPool.getPendingOffsets().set(0l);
               break;
           }
        }
    }
}
