//package com.hainiu.cat.web.kafka.consumer;
//
//import org.apache.kafka.clients.consumer.ConsumerRecord;
//import org.springframework.kafka.annotation.KafkaListener;
//import org.springframework.kafka.annotation.PartitionOffset;
//import org.springframework.kafka.annotation.TopicPartition;
//import org.springframework.stereotype.Component;
//
//import java.util.List;
//
///**
// * create by biji.zhao on 2021/6/29
// */
//
//@Component
//public class KafkaConsumer {
//
//
////
////    // 消息过滤监听
////    @KafkaListener(topics = {"topic_filter"}, containerFactory = "filterContainerFactory", groupId = "group_Id_1")
////    public void onMessage6Group1(ConsumerRecord<?, ?> record) {
////        System.out.println(record.value()+ "我是分组1");
////    }
////
////    // 消息过滤监听
////    @KafkaListener(topics = {"topic_filter"}, containerFactory = "filterContainerFactory", groupId = "group_Id_2")
////    public void onMessage6Group2(ConsumerRecord<?, ?> record) {
////        System.out.println(record.value() + "我是分组2");
////    }
////
////
////    // 消费监听
////    @KafkaListener(topics = {"topic1"})
////    public void onMessage1(ConsumerRecord<?, ?> record){
////        // 消费的哪个topic、partition的消息,打印出消息内容
////        System.out.println("简单消费："+record.topic()+"-"+record.partition()+"-"+record.value());
////    }
////
////
////    /**
////     * 指定topic、partition、offset消费
////     * 同时监听topic1和topic2，监听topic1的0号分区、topic2的 "0号和1号" 分区，指向1号分区的offset初始值为8
////     * [record]
////     **/
////    @KafkaListener(id = "consumer1",groupId = "felix-group",topicPartitions = {
////            // topic主题  partitions分区    topic2的1号分区的offset初始值为8
////            @TopicPartition(topic = "topic1", partitions = { "0" }),
////            @TopicPartition(topic = "topic2", partitions = "0", partitionOffsets = @PartitionOffset(partition = "1", initialOffset = "8"))
////    })
////    public void onMessage2(ConsumerRecord<?, ?> record) {
////        System.out.println("topic:"+record.topic()+"|partition:"+record.partition()+"|offset:"+record.offset()+"|value:"+record.value());
////    }
////
////
////    /**
////     * 批量消费
////     *          # 设置批量消费
////     *          spring.kafka.listener.type=batch
////     *          # 批量消费每次最多消费多少条消息
////     *          spring.kafka.consumer.max-poll-records=50
////     * @param records
////     */
////    @KafkaListener(id = "consumer2",groupId = "felix-group", topics = "topic1")
////    public void onMessage3(List<ConsumerRecord<?, ?>> records) {
////        System.out.println(">>>批量消费一次，records.size()="+records.size());
////        for (ConsumerRecord<?, ?> record : records) {
////            System.out.println(record.value());
////        }
////    }
////
////    // 将这个异常处理器的BeanName放到@KafkaListener注解的errorHandler属性里面
////    // consumerAwareErrorHandler 处理监听的异常
////    @KafkaListener(topics = {"topic_callback"}, errorHandler = "consumerAwareErrorHandler")
////    public void onMessage4(ConsumerRecord<?, ?> record) throws Exception {
////        throw new Exception("简单消费-模拟异常");
////    }
////
////    // 批量消费也一样，异常处理器的message.getPayload()也可以拿到各条消息的信息
////    @KafkaListener(topics = "topic1",errorHandler="consumerAwareErrorHandler")
////    public void onMessage5(List<ConsumerRecord<?, ?>> records) throws Exception {
////        System.out.println("批量消费一次...");
////        throw new Exception("批量消费-模拟异常");
////    }
////
////
////    // 监听器
////    @KafkaListener(id="timingConsumer", topics = "topic1",containerFactory = "delayContainerFactory")
////    public void onMessageTiming(ConsumerRecord<?, ?> record){
////        System.out.println("消费成功："+record.topic()+"-"+record.partition()+"-"+record.value());
////    }
//}
