package com.ln.kafka.v2_4_0.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;


public class ConsumerSimple {

    public static String TOPIC_NAME = "topic-3";

    public static void main(String[] args) throws ExecutionException, InterruptedException {
//        helloworld();

//        commitedOffset();

        // 手动提交 offset，并且手动控制Partition
        commitedOffsetWithPartition();

        // 手动提交 offset，并且手动控制Partition，手动订阅某个分区并提交
//        commitedOffsetWithPartition2();

    }

    /**
     * 工作中这种用法有但不推荐
     */
    public static void helloworld() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "localhost:9092");
        // 消费者组
        props.setProperty("group.id", "test");
        props.setProperty("enable.auto.commit", "true");
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer(props);
        // 订阅主题
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true) {
            // 每秒拉取一回
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> record : records) {
                System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n", record.partition(), record.offset(), record.key(), record.value());
            }
        }
    }

    /**
     * 手动提交 offset
     */
    public static void commitedOffset() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "localhost:9092");
        // 消费者组
        props.setProperty("group.id", "test");
        // 修改为手动提交
        props.setProperty("enable.auto.commit", "false");
        // 改为了手动提交，那么这个配置不生效了
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer(props);
        // 订阅主题
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true) {
            // 每秒拉取一回
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> record : records) {
                // 业务代码
                System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n", record.partition(), record.offset(), record.key(), record.value());
                // 如果失败，则回滚，不要offset提交
            }
            // 如果成功，¡则手动通知offset提交
            consumer.commitAsync();
        }
    }


    /**
     * 手动提交 offset，并且手动控制Partition
     */
    public static void commitedOffsetWithPartition() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "localhost:9092");
        // 消费者组
        props.setProperty("group.id", "test");
        // 修改为手动提交
        props.setProperty("enable.auto.commit", "false");
        // 改为了手动提交，那么这个配置不生效了
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer(props);
        // 订阅主题
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true) {
            // 每秒拉取一回
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            // 对每个partition单独处理
            records.partitions().forEach(partition -> {
                // 以partition获取到信息
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                pRecord.forEach(record -> {
                    System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                            record.partition(), record.offset(), record.key(), record.value());
                });
                // 实际的offset是本次处理的最后一个offset
                long lastOffset = pRecord.get(pRecord.size() - 1).offset();
                // 单个partition中的offset，并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                // 下一个offset开始是 当前的offset + 1的值。
                offset.put(partition, new OffsetAndMetadata(lastOffset + 1));
                // 提交offset
                consumer.commitSync(offset);
                System.out.println("======= partition - " + partition + " end====");
            });
        }
    }

    /**
     * 手动提交 offset，并且手动控制Partition，手动订阅某个分区并提交
     *
     */
    public static void commitedOffsetWithPartition2() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "localhost:9092");
        // 消费者组
        props.setProperty("group.id", "test");
        // 修改为手动提交
        props.setProperty("enable.auto.commit", "false");
        // 改为了手动提交，那么这个配置不生效了
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer(props);

        // topic-2 下有两个partition
        TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);
        TopicPartition p1 = new TopicPartition(TOPIC_NAME, 1);
        // 消费者指定订阅主题的某个分区
        consumer.assign(Arrays.asList(p0));

        while (true) {
            // 每秒拉取一回
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            // 对每个partition单独处理
            records.partitions().forEach(partition -> {
                // 以partition获取到信息
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                pRecord.forEach(record -> {
                    System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                            record.partition(), record.offset(), record.key(), record.value());
                });
                // 实际的offset是本次处理的最后一个offset
                long lastOffset = pRecord.get(pRecord.size() - 1).offset();
                // 单个partition中的offset，并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                // 下一个offset开始是 当前的offset + 1的值。
                offset.put(partition, new OffsetAndMetadata(lastOffset + 1));
                // 提交offset
                consumer.commitSync(offset);
                System.out.println("======= partition - " + partition + " end====");
            });
        }
    }


    /**
     * 手动指定offse的起始位置及手动提交
     * 人为控制offset起始位置，如果出现程序错误，可以重复消费一次
     *
     * 一般使用逻辑
     * 1. 第一次从0消费
     * 2. 如果一次消费了100条，offset置为101，并且存入redis
     * 3. 每次poll之前，从redis中获取最新的offset位置
     * 4. 每次从这个位置开始消费
     */
    public static void controllerOffset() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "localhost:9092");
        // 消费者组
        props.setProperty("group.id", "test");
        // 修改为手动提交
        props.setProperty("enable.auto.commit", "false");
        // 改为了手动提交，那么这个配置不生效了
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer(props);

        // topic-2 下有两个partition
        TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);
        TopicPartition p1 = new TopicPartition(TOPIC_NAME, 1);
        // 消费者指定订阅主题的某个分区
        consumer.assign(Arrays.asList(p0, p1));

        // 设置上限
        long totalNum = 40;

        while (true) {
            // 手动指定offset起始位置
            consumer.seek(p0, 700);

            // 每秒拉取一回
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            // 对每个partition单独处理
            records.partitions().forEach(partition -> {
                // 以partition获取到信息
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                long num = 0;
                for (ConsumerRecord<String, String> record : pRecord) {
                    System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                            record.partition(), record.offset(), record.key(), record.value());
                    num++;
                    // 当num 超过 上限的时候停止 p0 partition 的消费
                    if(record.partition() == 0) {
                        if(num >= totalNum) {
                            consumer.pause(Arrays.asList(p0));
                        }
                    }
                    // 不太明白？
                    if(record.partition() == 1) {
                        if(num == 40) {
                            consumer.resume(Arrays.asList(p0));
                        }
                    }
                }


                // 实际的offset是本次处理的最后一个offset
                long lastOffset = pRecord.get(pRecord.size() - 1).offset();
                // 单个partition中的offset，并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                // 下一个offset开始是 当前的offset + 1的值。
                offset.put(partition, new OffsetAndMetadata(lastOffset + 1));
                // 提交offset
                consumer.commitSync(offset);
                System.out.println("======= partition - " + partition + " end====");
            });
        }
    }

    /**
     * 流量控制 - 限流
     * 实际应用场景的思路：
     * 1. 一般是 topic 下单个 partition 来使用
     * 2. 需要一个令牌桶的服务，每次消费消息的时候去取令牌。取到令牌则执行业务，取不到令牌则暂停消费，一直等到取到在进行消费
     */
    public static void controllerLimit() {
        Properties props = new Properties();
        props.setProperty("bootstrap.servers", "localhost:9092");
        // 消费者组
        props.setProperty("group.id", "test");
        // 修改为手动提交
        props.setProperty("enable.auto.commit", "false");
        // 改为了手动提交，那么这个配置不生效了
        props.setProperty("auto.commit.interval.ms", "1000");
        props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        KafkaConsumer<String, String> consumer = new KafkaConsumer(props);

        // topic-2 下有两个partition
        TopicPartition p0 = new TopicPartition(TOPIC_NAME, 0);
        // 消费者指定订阅主题的某个分区
        consumer.assign(Arrays.asList(p0));

        while (true) {
            // 手动指定offset起始位置
            consumer.seek(p0, 700);

            // 每秒拉取一回
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            // 对每个partition单独处理
            records.partitions().forEach(partition -> {
                // 以partition获取到信息
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);
                pRecord.forEach(record -> {
                    System.out.printf("partition = %d, offset = %d, key = %s, value = %s%n",
                            record.partition(), record.offset(), record.key(), record.value());
                });
                // 实际的offset是本次处理的最后一个offset
                long lastOffset = pRecord.get(pRecord.size() - 1).offset();
                // 单个partition中的offset，并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                // 下一个offset开始是 当前的offset + 1的值。
                offset.put(partition, new OffsetAndMetadata(lastOffset + 1));
                // 提交offset
                consumer.commitSync(offset);
                System.out.println("======= partition - " + partition + " end====");
            });
        }
    }
}
