package com.lhc.kafkademo.consumer;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;

import java.time.Duration;
import java.util.*;

public class ConsumerSample {

    private final static String TOPIC_NAME = "jz_topic";

    public static void main(String[] args) {

        // helloworld();

        //commitedOffset();

//        commitedOffsetWithPartition();

//        commitedOffsetWithPartition2();

        controllOffset();

    }


    /**
     * 简单消费程序
     */
    private static void helloworld(){

        Properties properties = new Properties();
        // 必须设置的属性
        properties.put("bootstrap.servers", "192.168.11.130:9092");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("group.id", "test");

        // 可选设置属性
        properties.put("enable.auto.commit", "true");
        // 自动提交offset,每1s提交一次
        properties.put("auto.commit.interval.ms", "1000");

        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        //消费订阅哪一个Topic或者几个Topic
        kafkaConsumer.subscribe(Collections.singletonList(TOPIC_NAME));
        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(10000));
            for (ConsumerRecord<String, String> record : records){
                System.out.printf("partition - %d, offset - %d, key - %s, value - %s%n",
                        record.partition(), record.offset(), record.key(), record.value());
            }
        }
    }

    /**
     * 手动提交offset
     */
    private static void commitedOffset(){

        Properties properties = new Properties();
        // 必须设置的属性
        properties.put("bootstrap.servers", "192.168.11.130:9092");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("group.id", "test");

        // 可选设置属性
        properties.put("enable.auto.commit", "false");
        // 自动提交offset,每1s提交一次 ,false 后此项不生效
        properties.put("auto.commit.interval.ms", "1000");

        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        //消费订阅哪一个Topic或者几个Topic
        kafkaConsumer.subscribe(Collections.singletonList(TOPIC_NAME));
        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(10000));
            for (ConsumerRecord<String, String> record : records){
                // 把数据保存到数据库
                // TODO save 2 db
                System.out.printf("partition - %d, offset - %d, key - %s, value - %s%n",
                        record.partition(), record.offset(), record.key(), record.value());
                //如果失败则回滚
            }
            //如果成功，手动通知offset提交
            kafkaConsumer.commitAsync();
        }
    }

    /**
     * 手动提交offset，并且手动控制partition
     */
    private static void commitedOffsetWithPartition(){

        Properties properties = new Properties();
        // 必须设置的属性
        properties.put("bootstrap.servers", "192.168.11.130:9092");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("group.id", "test");

        // 可选设置属性
        properties.put("enable.auto.commit", "false");
        // 自动提交offset,每1s提交一次 ,false 后此项不生效
        properties.put("auto.commit.interval.ms", "1000");

        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);
        //消费订阅哪一个Topic或者几个Topic
        kafkaConsumer.subscribe(Collections.singletonList(TOPIC_NAME));
        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(10000));
            //按partition 单独处理
            for(TopicPartition partition: records.partitions()){
                List<ConsumerRecord<String, String>> pRecords = records.records(partition);
                for (ConsumerRecord<String, String> record : pRecords){
                    System.out.printf("partition - %d, offset - %d, key - %s, value - %s%n",
                            record.partition(), record.offset(), record.key(), record.value());
                }
                long lastOffset = pRecords.get(pRecords.size()-1).offset();
                //单个partition中的offset,并且提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition, new OffsetAndMetadata(lastOffset+1));
                kafkaConsumer.commitSync(offset);
                System.out.println("=======================partition - "+ partition +"==========================");
            }
        }
    }

    /**
     * 手动提交offset，并且手动控制partition
     */
    private static void commitedOffsetWithPartition2(){

        Properties properties = new Properties();
        // 必须设置的属性
        properties.put("bootstrap.servers", "192.168.11.130:9092");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("group.id", "test");

        // 可选设置属性
        properties.put("enable.auto.commit", "false");
        // 自动提交offset,每1s提交一次 ,false 后此项不生效
        properties.put("auto.commit.interval.ms", "1000");

        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);

        //两个partition
        TopicPartition partition0 = new TopicPartition(TOPIC_NAME, 0);
        TopicPartition partition1 = new TopicPartition(TOPIC_NAME, 1);

        //消费订阅某个Topic的某个partition
        kafkaConsumer.assign(Collections.singleton(partition0));

        while (true){
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(10000));
            //按partition 单独处理
            for(TopicPartition partition: records.partitions()){
                List<ConsumerRecord<String, String>> pRecords = records.records(partition);
                for (ConsumerRecord<String, String> record : pRecords){
                    System.out.printf("partition - %d, offset - %d, key - %s, value - %s%n",
                            record.partition(), record.offset(), record.key(), record.value());
                }
                long lastOffset = pRecords.get(pRecords.size()-1).offset();
                //单个partition中的offset,并且提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition, new OffsetAndMetadata(lastOffset+1));
                kafkaConsumer.commitSync(offset);
                System.out.println("=======================partition - "+ partition +"==========================");
            }
        }
    }


    /**
     * 手动指定offset的起始位置，及手动提交offset
     */
    private static void controllOffset(){

        Properties properties = new Properties();
        // 必须设置的属性
        properties.put("bootstrap.servers", "192.168.11.130:9092");
        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("group.id", "test");

        // 可选设置属性
        properties.put("enable.auto.commit", "false");
        // 自动提交offset,每1s提交一次 ,false 后此项不生效
        properties.put("auto.commit.interval.ms", "1000");

        KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<>(properties);

        //两个partition
        TopicPartition partition0 = new TopicPartition(TOPIC_NAME, 0);

        //消费订阅某个Topic的某个partition
        kafkaConsumer.assign(Collections.singleton(partition0));

        while (true){
            /**
             * 1.人为控制offset起始位置
             * 2.如果出现程序错误，记住offset起始位置，重复消费
             */
            /**
             * 1.第一次从0消费【一般情况】
             * 2.比如一次消费100条，offset设置为101并且存入redis
             * 3.每次poll之前，从redis中获取最新的offset位置
             * 4.每次从这个位置开始消费
             */
            //手动指定offset的起始位置
            kafkaConsumer.seek(partition0, 60);
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(10000));
            //按partition 单独处理
            for(TopicPartition partition: records.partitions()){
                List<ConsumerRecord<String, String>> pRecords = records.records(partition);
                for (ConsumerRecord<String, String> record : pRecords){
                    System.out.printf("partition - %d, offset - %d, key - %s, value - %s%n",
                            record.partition(), record.offset(), record.key(), record.value());
                }
                long lastOffset = pRecords.get(pRecords.size()-1).offset();
                //单个partition中的offset,并且提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition, new OffsetAndMetadata(lastOffset+1));
                kafkaConsumer.commitSync(offset);
                System.out.println("=======================partition - "+ partition +"==========================");
            }
        }
    }
}
