package com.audaque.springboot.foshanupload.web.kafkademo.controller;

import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.springframework.web.bind.annotation.GetMapping;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RestController;

import java.util.*;

/**
 * @author zgb
 * @desc ...
 * @date 2023-05-23 20:30:28
 */

@Slf4j
@RestController
@RequestMapping("/kafka/consumer")
public class ConsumerBatchController {


    private KafkaConsumer<String, String> getKafkaConsumer(Integer readNum) {
        Properties props = new Properties();
        // Kafka服务端的主机名和端口号
        props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
        // 等待所有副本节点的应答
        props.put("acks", "all");
        // 消息发送最大尝试次数
        props.put("retries", 0);
        // 一批消息处理大小
        props.put("batch.size", 16384);
        // 请求延时
        props.put("linger.ms", 1);
        // 发送缓存区内存大小
        props.put("buffer.memory", 33554432);
        //调用返回的记录数
        if (readNum != null) {
            props.put("max.poll.records", readNum);
        }
        /**
         * auto.offset.reset 属性可以设置为  none earliest latest 三种 属性如下
         *
         * none
         *
         * 如果没有为消费者找到先前的offset的值,即没有自动维护偏移量,也没有手动维护偏移量,则抛出异常
         *
         * earliest
         *
         * 在各分区下有提交的offset时：从offset处开始消费
         *
         * 在各分区下无提交的offset时：从头开始消费
         *
         * latest
         *
         * 在各分区下有提交的offset时：从offset处开始消费
         *
         * 在各分区下无提交的offset时：从最新的数据开始消费
         */
        props.put("auto.offset.reset", "earliest");
        // 关闭自动提交offset
        props.setProperty("enable.auto.commit", "false");
        //设置消费者组名称
        props.put("group.id", "mykafka1");

        props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
        return consumer;
    }


    /**
     * //从哪个offset开始
     * //topic的名字
     * //读取多少条
     *
     * @throws InterruptedException
     */
    @GetMapping("/subscribe")
    public void subscribe(Long offsetStart, String topicName, Integer readNum) throws InterruptedException {
        KafkaConsumer<String, String> consumer = this.getKafkaConsumer(readNum);

        Map<TopicPartition, OffsetAndMetadata> hashMaps = new HashMap<TopicPartition, OffsetAndMetadata>();
        hashMaps.put(new TopicPartition(topicName, 0), new OffsetAndMetadata(offsetStart));
        //同步提交
        consumer.commitSync(hashMaps);
        //订阅指定主题
        //subscribe()的方式具有分区自动再均衡的功能，当消费者增加或者减少的时候，该订阅方式能够自动按照分区分配策略对数据进行重新的分配。但是assign()的方式是不行的。
        consumer.subscribe(Arrays.asList(topicName));
        //拉取消息
        ConsumerRecords<String, String> records = consumer.poll(1000);
        for (ConsumerRecord<String, String> record : records) {
            System.out.println(record.toString());
        }


    }

    @GetMapping("/assign")
    public void assign(Long offsetStart, String topicName, Integer readNum) throws InterruptedException {
        KafkaConsumer<String, String> consumer = this.getKafkaConsumer(readNum);

        List<TopicPartition> list = new ArrayList<>();
        // 根据指定主题，获取到所有分区信息
        List<PartitionInfo> partitions = consumer.partitionsFor(topicName);

        // 为每个分区设置对应的起始时间
        for (PartitionInfo par : partitions) {
            list.add(new TopicPartition(topicName, par.partition()));
        }

        for (TopicPartition topicPartition : list) {
            // 订阅主题中指定的分区
            consumer.assign(Arrays.asList(topicPartition));
            //分区的固定位置开始消费
            consumer.seek(new TopicPartition(topicName, 0), offsetStart);

        }
        //拉取消息
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(1000);
            for (ConsumerRecord<String, String> record : records) {
                System.out.println(record.toString());
            }
        }

    }




}
