package com.example;

import org.apache.kafka.clients.consumer.*;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.Test;

import java.time.Duration;
import java.util.*;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;
import java.util.stream.Collectors;

public class KafkaTest {

    private static String TOPIC = "msb-item";

    @Test
    public void producer() throws ExecutionException, InterruptedException {
        Properties p = new Properties();
        p.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.101:9092,192.168.1.101:9092");
        p.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.put(ProducerConfig.ACKS_CONFIG, "");

        KafkaProducer<String, String> producer = new KafkaProducer<>(p);

        while (true) {
            for (int i = 0; i < 3; i++) {
                for (int j = 0; j < 3; j++) {
                    ProducerRecord<String, String> record = new ProducerRecord<>(TOPIC, "item" + j, "val" + i);
                    Future<RecordMetadata> send = producer.send(record);

                    RecordMetadata metadata = send.get();

                    int partition = metadata.partition();
                    long offset = metadata.offset();
                    long timestamp = metadata.timestamp();

                    System.out.println(record.key() + " " + record.value() + " partition=" + partition + " offset=" + offset);

                    Thread.sleep(1000);
                }
            }
        }

    }

    @Test
    public void consumer() {

        Properties p = new Properties();
        p.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.1.101:9092,192.168.1.101:9092");
        p.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        p.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

        p.put(ConsumerConfig.GROUP_ID_CONFIG, "XXOO3");
        /**
         * What to do when there is no initial offset in Kafka or if the current offset does not exist any more on the server
         * (e.g. because that data has been deleted):
         * <ul>
         *     <li>earliest: automatically reset the offset to the earliest offset
         *     <li>latest: automatically reset the offset to the latest offset</li>
         *     <li>none: throw exception to the consumer if no previous offset is found for the consumer's group</li>
         *     <li>anything else: throw exception to the consumer.</li>
         * </ul>
         */
        p.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
        p.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "true");
//        p.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "5000");
//        p.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, )

        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(p);

        consumer.subscribe(Arrays.asList(TOPIC), new ConsumerRebalanceListener() {
            @Override
            public void onPartitionsRevoked(Collection<TopicPartition> collection) {

            }

            @Override
            public void onPartitionsAssigned(Collection<TopicPartition> collection) {

            }
        });

        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ZERO);

            if (records.isEmpty()) {
                continue;
            }

//            records.forEach(r -> {
//                System.out.println(r);
//
//                String key = r.key();
//                String value = r.value();
//                int partition = r.partition();
//                long offset = r.offset();
//
//                // 按消息进度同步提交
////                HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
////                TopicPartition topicPartition = new TopicPartition(TOPIC, partition);
////                OffsetAndMetadata metadata = new OffsetAndMetadata(offset);
////                offsets.put(topicPartition, metadata);
////                consumer.commitSync(offsets);
//            });

            System.out.println("=================================");

            records.partitions().forEach(partition -> {
                records.records(partition).forEach(r -> {
                    String key = r.key();
                    String value = r.value();
                    int part = r.partition();
                    long offset = r.offset();

                    System.out.println("key:" + key + "  value:" + value + "  partition:" + part + "  offset:" + offset);
                    try {
                        Thread.sleep(1500);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                });

                // 按分区同步提交
                HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
                int size = records.records(partition).size();
                long offset = records.records(partition).get(size - 1).offset();
                OffsetAndMetadata metadata = new OffsetAndMetadata(offset);
                offsets.put(partition, metadata);
                consumer.commitSync();

                System.out.println("------------------------");
            });

            System.out.println("=================================");

//            records.partitions().forEach(partition -> {
//                List<String> redis = records.records(partition)
//                        .parallelStream()  // 并行处理
//                        .map(r -> {
//                            String value = r.value();
//                            return value + "_redis";
//                        })
//                        .filter(s -> s.contains("redis"))
//                        .collect(Collectors.toList());
//
//                System.out.println(redis);
//
//                try {
//                    Thread.sleep(2500);
//                } catch (InterruptedException e) {
//                    e.printStackTrace();
//                }
//
//                // 按分区整体提交
//
//                // 按分区同步提交
////                HashMap<TopicPartition, OffsetAndMetadata> offsets = new HashMap<>();
////                int size = records.records(partition).size();
////                OffsetAndMetadata metadata = new OffsetAndMetadata(size - 1);
////                offsets.put(partition, metadata);
////                consumer.commitSync();
//
//                System.out.println("------------------------");
//            });


            // 按当前poll的批次同步提交
//            consumer.commitSync();
        }


    }


}
