package com.ly.kafka.utils;

import com.common.redis.service.RedisService;
import com.ly.kafka.empty.Order;
import com.ly.kafka.empty.OrderDTO;
import com.ly.kafka.pool.KafkaConsumerPool;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.annotation.TopicPartition;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;
import org.springframework.transaction.annotation.Transactional;

import java.util.List;
import java.util.Optional;

@Component
@Slf4j
public class ConsumerUtils {
    private final static Logger log = LoggerFactory.getLogger(ConsumerUtils.class);


    private RedisService redisService;

    @Autowired
    private KafkaConsumerPool orderConsumerPool;

//    @Bean
//    public KafkaListenerContainerFactory<?> batchFactory(ConsumerFactory consumerFactory){
//        ConcurrentKafkaListenerContainerFactory<Integer,String> factory =
//                new ConcurrentKafkaListenerContainerFactory<>();
//        factory.setConsumerFactory(consumerFactory);
//        factory.setConcurrency(5);
//        factory.getContainerProperties().setPollTimeout(1000);
//        factory.setBatchListener(true);//设置为批量消费，每个批次数量在Kafka配置参数中设置
//        factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL_IMMEDIATE);//设置手动提交ackMode
//        return factory;
//    }
//    @Autowired
//    private KafkaListenerContainerFactory<?> batchFactory;


    /**
     * 单条的消费kafka消息 单点消费
     * @param record : 消息记录
     * @param ack : ack回调确认
     *     errorHandler : 自定义异常处理器
     *     containerFactory : 自定义拦截器
     * @return void :
     */
//    @KafkaListener(topics = "order-topic-test",topicPartitions = {
//            @TopicPartition(topic = "order-topic-test", partitions = {"0" ,"1" ,"2","3","4","5","6","7","8"}),
//    }, groupId = "testGroup3")
    public void topicTest(ConsumerRecord<String, String> record, Acknowledgment ack)  {
        Optional<String> message = Optional.ofNullable(record.value());
        if (message.isPresent()) {
            Object msg = message.get();
//            log.info("topic_test 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
            System.out.println("topic_test 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
            //手动提交offset
            ack.acknowledge();
        }
    }

    /**
     * @Title 消息转发
     * @Description 从topic1接收到的消息经过处理后转发到topic2
     * @Param [record]
     * @return void
     **/
//    @KafkaListener(topics = "order-topic-test",topicPartitions = {
//            @TopicPartition(topic = "order-topic-test", partitions = {"0" ,"1" ,"2","3","4","5","6","7","8"}),
//    }, groupId = "testGroup")
//    @SendTo("hello-kraft1")
    public void onMessage2(ConsumerRecord<?, ?> record) {
        System.out.println("topic:"+record.topic()+"|partition:"+record.partition()+"|offset:"+record.offset()+"|value:"+record.value());
    }





    /**
     * 批量的消费kafka消息，要配合containerFactory使用，配置的bean见batchFactory
     *
     * @param records : 消息记录列表
     * @param ack : ack回调确认
     * @return void :
     */
    @Transactional(rollbackFor = Exception.class)
    @KafkaListener(id="topicTest2",topics = "hello-kraft1", topicPartitions = {
            @TopicPartition(topic ="hello-kraft1", partitions = {"0" ,"1" ,"2","3","4","5","6","7","8"}),
    }, groupId = "testGroup1")
    public void topicTest2(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
        try {
            for (ConsumerRecord<String, String> record : records) {
                //取到消息后，先查询缓存中是否已经存在，存在表示不需要再次处理
                //如果消息不存在，业务处理完成后将消息存入redis；如果消息存在直接跳过；这样可防止重复消费
//                boolean isExists = redisService.redisTemplate.hasKey(record.topic() + record.partition() + record.key());
                if (true) {
                    Optional<String> message = Optional.ofNullable(record.value());
                    if (message.isPresent()) {
                        Object msg = message.get();
//                        log.info("topic_test1 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
                        System.out.println("hello-kraft1 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
                    }
//                    redisUtils.set(record.topic() + record.partition() + record.key(), record.value());
                }
            }
            //手动提交offset
            ack.acknowledge();
        }catch (Exception e){
//            log.error(e.getMessage());
            throw e;
        }
    }


    /**
     * 批量的消费kafka消息，要配合containerFactory使用，配置的bean见batchFactory
     *   containerFactory="filterContainerFactory" : 自定义拦截器
     * @param records : 消息记录列表
     * @param ack : ack回调确认
     * @return void :
     */
    @Transactional(rollbackFor = Exception.class)
    @KafkaListener(id="topicTest3",topics = "order-topic-test-2", topicPartitions = {
            @TopicPartition(topic ="order-topic-test-2", partitions = {"0"}),
    }, groupId = "testGroup1",containerFactory="filterContainerFactory")
    public void topicTest3(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
        try {
            for (ConsumerRecord<String, String> record : records) {
                //取到消息后，先查询缓存中是否已经存在，存在表示不需要再次处理
                //如果消息不存在，业务处理完成后将消息存入redis；如果消息存在直接跳过；这样可防止重复消费
//                boolean isExists = redisService.redisTemplate.hasKey(record.topic() + record.partition() + record.key());
                if (true) {
                    Optional<String> message = Optional.ofNullable(record.value());
                    if (message.isPresent()) {
                        Object msg = message.get();
//                        log.info("topic_test1 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
                        System.out.println("order-topic-test-2 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
                    }
//                    redisUtils.set(record.topic() + record.partition() + record.key(), record.value());
                }
            }
            //手动提交offset
            ack.acknowledge();
        }catch (Exception e){
//            log.error(e.getMessage());
            throw e;
        }
    }




    /**
     * 批量的消费kafka消息，要配合containerFactory使用，配置的bean见batchFactory
     *  containerFactory = "delayContainerFactory" 设置定时启动监听器，需要配置这个参数，因为
     *    spring在启动时自动将监听打开了，我们的定时就不起作用了
     * @param records : 消息记录列表
     * @param ack : ack回调确认
     * @return void :
     */
    @Transactional(rollbackFor = Exception.class)
    @KafkaListener(id="timingConsumer",topics = "hello-kraft3", topicPartitions = {
            @TopicPartition(topic ="hello-kraft3", partitions = {"0" ,"1" ,"2","3","4","5","6","7","8"}),
    }, groupId = "testGroup1",containerFactory = "delayContainerFactory")
    public void topicTest4(List<ConsumerRecord<String, String>> records, Acknowledgment ack) {
        try {
            for (ConsumerRecord<String, String> record : records) {
                //取到消息后，先查询缓存中是否已经存在，存在表示不需要再次处理
                //如果消息不存在，业务处理完成后将消息存入redis；如果消息存在直接跳过；这样可防止重复消费
//                boolean isExists = redisService.redisTemplate.hasKey(record.topic() + record.partition() + record.key());
                if (true) {
                    Optional<String> message = Optional.ofNullable(record.value());
                    if (message.isPresent()) {
                        Object msg = message.get();
//                        log.info("topic_test1 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
                        System.out.println("hello-kraft3 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
                    }
//                    redisUtils.set(record.topic() + record.partition() + record.key(), record.value());
                }
            }
            //手动提交offset
            ack.acknowledge();
        }catch (Exception e){
//            log.error(e.getMessage());
            throw e;
        }
    }



    /**
     * 批量的消费kafka消息
     * @param records : 消息记录列表
     * @param ack : ack回调确认
     * @return void :
     */
    @Transactional(rollbackFor = Exception.class)
    @KafkaListener(id="topicTest5",topics = "hello-kraft", topicPartitions = {
            @TopicPartition(topic ="hello-kraft", partitions = {"0" ,"1" ,"2","3","4","5","6","7","8"}),
    }, groupId = "testGroup1")
    public void topicTest5(List<ConsumerRecord<String, String>> records, Acknowledgment ack) throws InterruptedException {
        try {
            for (ConsumerRecord<String, String> record : records) {
                //取到消息后，先查询缓存中是否已经存在，存在表示不需要再次处理
                //如果消息不存在，业务处理完成后将消息存入redis；如果消息存在直接跳过；这样可防止重复消费
//                boolean isExists = redisService.redisTemplate.hasKey(record.topic() + record.partition() + record.key());
                if (true) {
                    Optional<String> message = Optional.ofNullable(record.value());
                    if (message.isPresent()) {
                        Object msg = message.get();
//                        log.info("topic_test1 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
                        System.out.println("hello-kraft5 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
                    }
//                    redisUtils.set(record.topic() + record.partition() + record.key(), record.value());
                }
            }
            //手动提交offset
            ack.acknowledge();
        }catch (Exception e){
//            log.error(e.getMessage());
            throw e;
        }
    }






    @KafkaListener(id="topicTest6",topics = "hello-kraft", topicPartitions = {
            @TopicPartition(topic ="hello-kraft", partitions = {"0" ,"1" ,"2","3","4","5","6","7","8"}),
    }, groupId = "testGroup2")
    public void topicTest6(List<ConsumerRecord<String, String>> records, Acknowledgment ack) throws InterruptedException {
        try {
            for (ConsumerRecord<String, String> record : records) {
                //取到消息后，先查询缓存中是否已经存在，存在表示不需要再次处理
                //如果消息不存在，业务处理完成后将消息存入redis；如果消息存在直接跳过；这样可防止重复消费
//                boolean isExists = redisService.redisTemplate.hasKey(record.topic() + record.partition() + record.key());
                if (true) {
                    Optional<String> message = Optional.ofNullable(record.value());
                    if (message.isPresent()) {
                        Object msg = message.get();
//                        log.info("topic_test1 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
                        System.out.println("hello-kraft6 消费了： Topic:" + record.topic() + ",key:" + record.key() + ",Message:" + msg);
                    }
//                    redisUtils.set(record.topic() + record.partition() + record.key(), record.value());
                }
            }
            //手动提交offset
            ack.acknowledge();
        }catch (Exception e){
//            log.error(e.getMessage());
            throw e;
        }
    }



    //多线程保证顺序的消费 一个consumer -->多个工作队列
    @KafkaListener(id="topicTest7",topics = "baiyan-test1", topicPartitions = {
            @TopicPartition(topic ="baiyan-test1", partitions = {"0" ,"1" ,"2","3","4","5","6","7","8"}),
    }, groupId = "testGroup2")
    public void topicTest7(List<ConsumerRecord<String, String>> records, Acknowledgment ack) throws InterruptedException {
        try {
            if(records.isEmpty()){
                return;
            }
            //根据订单id 将数据分组 根据业务一组的 需要放在一个队列里面 由一个线程执行1保证顺序
            records.forEach(consumerRecord->{
                OrderDTO order = GsonUtil.gsonToBean(consumerRecord.value().toString(), OrderDTO.class);
                orderConsumerPool.submitTask(order.getId(),order);
            });

            // 当线程池中任务处理完成的计数达到拉取到的记录数时提交
            // 注意这里如果存在部分业务阻塞时间很长，会导致位移提交不上去，务必做好一些熔断措施
            while (true){
                if(records.size() == orderConsumerPool.getPendingOffsets().get()){
                    ack.acknowledge();
                    log.info("offset提交：{}",records.get(records.size()-1).offset());
                    orderConsumerPool.getPendingOffsets().set(0L);
                    break;
                }
            }
        }catch (Exception e){
//            log.error(e.getMessage());
            throw e;
        }
    }





}
