package com.example.bigdata.kafka.consumer;
import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.text.DateFormat;
import java.text.SimpleDateFormat;
import java.time.Duration;
import java.util.*;

public class ConsumerSample {
    public final static String TOPIC_NAME = "hrsjw1_topic";

    public static void main(String[] args) {
        helloworld();
        // 手动提交offset
//        commitedOffset();
        // 手动对每个partition 进行提交
//        commitedOffsetWithPartition();
        // 手动订阅某个或者某些分区，提交Offset
//        commitedOffsetWithPartition2();
        // 手动指定offset的起始位置，并手动提交offset
//        controlOffset();
//        KafkaConsumerTest(340);
//        OffsetForTimes();
//        TimestampConsumer();

        // 通过offset 进行补数据 只补 offset + 1 位置的数据
//        KafkaConsumerReCall(TOPIC_NAME,0,343);
    }

    public static void TimestampConsumer(){
        Properties prop = KafkaConsumerProperties();
        KafkaConsumer consumer = new KafkaConsumer(prop);
        try {
            // 获取topic的partition信息
            List<PartitionInfo> partitionInfos = consumer.partitionsFor(TOPIC_NAME);
            List<TopicPartition> topicPartitions = new ArrayList<>();

            Map<TopicPartition, Long> timestampsToSearch = new HashMap<>();
            DateFormat df = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss");
            Date now = new Date();
            long nowTime = now.getTime();
            long fetchDataTime = nowTime - 1000 * 60 * 15;  // 计算30分钟之前的时间戳
//            long fetchDataTime = 1604992740270L;  // 计算30分钟之前的时间戳
            System.out.println("当前时间: " + df.format(now) +"\tfetchDataTime : "+fetchDataTime);
            for(PartitionInfo partitionInfo : partitionInfos) {
                topicPartitions.add(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()));
                timestampsToSearch.put(new TopicPartition(partitionInfo.topic(), partitionInfo.partition()), fetchDataTime);
            }
            consumer.assign(topicPartitions);
            // 获取每个partition一个小时之前的偏移量
            Map<TopicPartition, OffsetAndTimestamp> map = consumer.offsetsForTimes(timestampsToSearch);
            for(Map.Entry<TopicPartition, OffsetAndTimestamp> entry : map.entrySet()) {
                if(entry.getValue() != null)
                    System.out.println(
                        "topic : "+ entry.getKey().topic()+
                        "\tPartition : " + entry.getKey().partition()+
                        "\toffset : "+ entry.getValue().offset() +
                        "\tTimestamp : "+ entry.getValue().timestamp()
                );
            }
            OffsetAndTimestamp offsetTimestamp = null;
            System.out.println("开始设置各分区初始偏移量...");
            for(Map.Entry<TopicPartition, OffsetAndTimestamp> entry : map.entrySet()) {
                // 如果设置的查询偏移量的时间点大于最大的索引记录时间，那么value就为空
                offsetTimestamp = entry.getValue();
                if(offsetTimestamp != null) {
                    int partition = entry.getKey().partition();
                    long timestamp = offsetTimestamp.timestamp();
                    long offset = offsetTimestamp.offset();
                    System.out.println("partition = " + partition +
                            ", time = " + df.format(new Date(timestamp))+
                            ", offset = " + offset);
                    // 设置读取消息的偏移量
                    consumer.seek(entry.getKey(), offset);
                }
            }
            System.out.println("设置各分区初始偏移量结束...");

            while(true) {
                ConsumerRecords<String, String> records = consumer.poll(1000);
                for (ConsumerRecord<String, String> record : records) {
                    System.out.println("partition = " + record.partition() + ", offset = " + record.offset());
                }
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            consumer.close();
        }
    }
    /**
     *  根据时间戳 补数据
     */
    private static void OffsetForTimes(){
        Properties prop = KafkaConsumerProperties();
        KafkaConsumer consumer = new KafkaConsumer(prop);
        TopicPartition p0 = new TopicPartition(TOPIC_NAME,0);
        TopicPartition p1 = new TopicPartition(TOPIC_NAME,1);
        consumer.assign(Arrays.asList(p0,p1));
        Set<TopicPartition> assignment = consumer.assignment();
        HashMap<TopicPartition, Long> timestampToSearch = new HashMap<>();
        Long ConsuTimestamp = System.currentTimeMillis()-1*24*3600*1000;
        System.out.println("ConsuTimestamp:" + ConsuTimestamp);
        for (TopicPartition tp:assignment) {
            timestampToSearch.put(tp,ConsuTimestamp);
        }

        Map<TopicPartition, OffsetAndTimestamp> offsets = consumer.offsetsForTimes(timestampToSearch);
        for (TopicPartition tp : assignment) {
            OffsetAndTimestamp offsetAndTimestamp = offsets.get(tp);
            if (offsetAndTimestamp != null) {
                consumer.seek(tp,offsetAndTimestamp.offset());
            }
        }
    }

    /**
     *      回溯数据
     *
     */

    private static void KafkaConsumerReCall(String topic_name , int partition,int startoffset) {
        Properties props = KafkaConsumerProperties();
        KafkaConsumerProperties();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        TopicPartition topicPartition = new TopicPartition(topic_name, partition);
        consumer.assign(Arrays.asList(topicPartition));
        consumer.seek(topicPartition,startoffset+1);
        ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
        List<ConsumerRecord<String, String>> record = records.records(topicPartition);
        for (ConsumerRecord<String, String>  line : record) {
            long offset = startoffset + 1L ;
            if (offset == line.offset()){
                System.out.printf("topic : %s  partition : %d  offset : %d key : %s value : %s timestamp : %d %n",
                        line.topic(),line.partition(),line.offset(),line.key(),line.value(),line.timestamp());
            }
        }
        /*ConsumerRecord<String, String> line = records.records(topicPartition).get(0);
        System.out.printf("topic : %s  partition : %d  offset : %d key : %s value : %s timestamp : %d %n",
                line.topic(),line.partition(),line.offset(),line.key(),line.value(),line.timestamp());*/
    }
    private static void KafkaConsumerTest(long startOffset) {

        Properties props = KafkaConsumerProperties();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        // hrsjw1_topic  - 0,1 两个partition
        TopicPartition p0 = new TopicPartition(TOPIC_NAME,0);
        TopicPartition p1 = new TopicPartition(TOPIC_NAME,1);
        //消费订阅某个Topic 的某个分区
        consumer.assign(Arrays.asList(p0));
//        consumer.assign(Arrays.asList(p0,p1));
        consumer.seek(p0,startOffset+1);
        ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
        // 每个partition单独处理
        for(TopicPartition partition:records.partitions()){
            List<ConsumerRecord<String, String>> pRecord = records.records(partition);
            for (ConsumerRecord<String, String> record : pRecord) {
                System.out.printf("offset = %d, patition = %d ,key = %s, value = %s%n",
                        record.offset(), record.partition(), record.key(), record.value());
            }
            System.out.println("==========partition - "+partition +" ==========");
        }

    }



    public static Properties KafkaConsumerProperties() {
        Properties properties = new Properties();
        properties.setProperty(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "172.23.6.159:9092,172.23.6.160:9092,172.23.6.161:9092");
//        properties.setProperty("bootstrap.servers", "172.23.6.159:9092,172.23.6.160:9092,172.23.6.161:9092");
        properties.setProperty(ConsumerConfig.GROUP_ID_CONFIG, "test");
//        properties.setProperty("group.id", "test");
        properties.setProperty("enable.auto.commit", "false");
        properties.setProperty("auto.commit.interval.ms", "1000");
        // 序列化、反序列化 标准写法
        properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
//        properties.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//        properties.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        return properties ;
    }
    /**
     *  手动指定offset的起始位置，并手动提交offset
     */
    private static void controlOffset() {
        Properties props = KafkaConsumerProperties();

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        // hrsjw1_topic  - 0,1 两个partition
        TopicPartition p0 = new TopicPartition(TOPIC_NAME,0);
        TopicPartition p1 = new TopicPartition(TOPIC_NAME,1);
//        consumer.assign(Arrays.asList(p1));
        consumer.assign(Arrays.asList(p0,p1));
//        consumer.subscribe(Arrays.asList("first"));
        while (true) {
            // 手动指定offset 起始位置
            /**
             *  1、认为控制offset起始位置
             *  2、如果出现程序错误、重复消费一次
             *
             *  1 第一次从 0 消费 【一般】
             *  2 比如一次消费100条， offset 置为 101，存入redis
             *  3 每次poll之前，从redis获取最新的offset位置
             *  4 每次从这个位置开始消费
             */
            Long startOffset = 250L ;
            consumer.seek(p1,startOffset);
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            // 每个partition单独处理
            for(TopicPartition partition:records.partitions()){
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);

                for (ConsumerRecord<String, String> record : pRecord) {
                    System.out.printf("offset = %d, patition = %d ,key = %s, value = %s%n",
                            record.offset(), record.partition(), record.key(), record.value());
                }

                long lastOffset = pRecord.get(pRecord.size()-1).offset();
                // 单个partition 中的offset ， 并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition,new OffsetAndMetadata(lastOffset+1));
                // 提交offset
                consumer.commitSync(offset);
                System.out.println("==========partition - "+partition +" ==========");
            }
        }
    }

    /**
     * 手动提交 offset ,并且手动控制partition,更高级
     *  订阅某一个topic下的某一个partition 进行消费
     */
    private static void commitedOffsetWithPartition2() {
        Properties props = KafkaConsumerProperties();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);

        // hrsjw1_topic  - 0,1 两个partition
        TopicPartition p0 = new TopicPartition(TOPIC_NAME,0);
        TopicPartition p1 = new TopicPartition(TOPIC_NAME,1);
        //消费订阅某个Topic 的某个分区
        consumer.assign(Arrays.asList(p1));
//        consumer.assign(Arrays.asList(p0,p1));
        while (true) {
            consumer.seek(p1,360);
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            // 每个partition单独处理
            for(TopicPartition partition:records.partitions()){
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);

                for (ConsumerRecord<String, String> record : pRecord) {
                    System.out.printf("offset = %d, patition = %d ,key = %s, value = %s%n",
                            record.offset(), record.partition(), record.key(), record.value());
                }
                long lastOffset = pRecord.get(pRecord.size()-1).offset();
                // 单个partition 中的offset ， 并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition,new OffsetAndMetadata(lastOffset+1));
                // 提交offset
                consumer.commitSync(offset);
                System.out.println("==========partition - "+partition +" ==========");
            }
        }
    }

    /**
     * 手动提交 offset ,并且手动控制partition
     */
    private static void commitedOffsetWithPartition() {
        Properties props = KafkaConsumerProperties();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        // 消费订阅 哪一个Topic 或者几个 Topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            // 每个partition单独处理
            for(TopicPartition partition:records.partitions()){
                List<ConsumerRecord<String, String>> pRecord = records.records(partition);

                for (ConsumerRecord<String, String> record : pRecord) {
                    System.out.printf("offset = %d, patition = %d ,key = %s, value = %s ,timestamp=%d%n",
                            record.offset(), record.partition(), record.key(), record.value(),record.timestamp());
                }
                // 最后一次offset 的值
                long lastOffset = pRecord.get(pRecord.size()-1).offset();
                // 单个partition 中的offset ， 并且进行提交
                Map<TopicPartition, OffsetAndMetadata> offset = new HashMap<>();
                offset.put(partition,new OffsetAndMetadata(lastOffset+1));
                // 提交offset
//                consumer.commitSync(offset);
                System.out.println("consumed offset is "+ lastOffset);

//                OffsetAndMetadata offsetAndMetadata = consumer.committed();
//                System.out.println("commit offset is " + offsetAndMetadata.offset());
                System.out.println("==========partition - "+partition +" ==========");
            }
        }
    }

    /**
     * 手动提交 offset
     */
    private static void commitedOffset() {
        Properties props = KafkaConsumerProperties();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        TopicPartition tp = new TopicPartition(TOPIC_NAME,0);

        // 消费订阅 哪一个Topic 或者几个 Topic
        consumer.assign(Arrays.asList(tp));
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> record : records)  {
                // 实际业务场景、比如 把数据保存到数据库，
                // TODO record 2 db
                System.out.printf("offset = %d, patition = %d ,key = %s, value = %s%n",
                        record.offset(), record.partition(), record.key(), record.value());
                // 如果失败， 则回滚数据。 不提交offset
                //offset = 278, patition = 0 ,key = key_008, value = prefix1-value_008
            }
            // 如果成功 手动通知offset 提交
            consumer.commitAsync();
        }
    }

    /**
     * 实际使用中 有这种场景，但是不推荐
     */
    private static void helloworld() {
        Properties props = KafkaConsumerProperties();
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
        // 消费订阅 哪一个Topic 或者几个 Topic
        consumer.subscribe(Arrays.asList(TOPIC_NAME));
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> record : records)
                System.out.printf("offset = %d, patition = %d ,key = %s, value = %s%n",
                        record.offset(), record.partition(), record.key(), record.value());
        }
    }
}
