package com.guchenbo.example.kafka;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

import java.time.Duration;
import java.util.Arrays;
import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
import java.util.concurrent.Future;
import java.util.concurrent.atomic.AtomicInteger;

public class KafkaConsumerTest {

    public static void main(String[] args) throws ExecutionException, InterruptedException {
        //        producer();
        //            consumer();
        multiConsumer();
    }

    private static void multiConsumer() {
        ExecutorService executorService = Executors.newFixedThreadPool(10);
        for (int i = 0; i < 2; i++) {
            executorService.submit(() -> {
                consumer();
            });
        }
    }

    private static void consumer() {
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092,localhost:9093,localhost:9094");
        properties.put("group.id", "test_log41");
        //        properties.put("enable.auto.commit", "false");
        properties.put("max.poll.records", "1000");
        properties.put("auto.offset.reset", "earliest");
        properties.put("partition.assignment.strategy", "org.apache.kafka.clients.consumer.RoundRobinAssignor");

        properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");

        ProducerConfig producerConfig;
        KafkaConsumer kafkaConsumer = new KafkaConsumer(properties);
        kafkaConsumer.subscribe(Arrays.asList("part4"));

        //        ConsumerRecords<String, String> records2 = kafkaConsumer.poll(100);
        //        System.out.println(records2);
        //        System.out.println(records2.count());
        //        for (ConsumerRecord<String, String> record : records2) {
        //            System.out.println(record);
        //        }
        int i = 0;
        while (true) {
            ConsumerRecords<String, String> records = kafkaConsumer.poll(Duration.ofMillis(10).toMillis());
            for (ConsumerRecord<String, String> record : records) {
                System.out.println(i++);
                String msg = record.value();
                //                if (msg.contains("test log")) {
                System.out.println(msg);
                //                }
            }
        }
    }

    private static void producer() throws ExecutionException, InterruptedException {
        Properties properties = new Properties();
        properties.put("bootstrap.servers", "localhost:9092,localhost:9093,localhost:9094");

        properties.put("acks", "all");
        properties.put("retries", 0);
        properties.put("batch.size", 16384);
        properties.put("linger.ms", 1);
        properties.put("buffer.memory", 33554432);
        properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(properties);
        AtomicInteger cnt = new AtomicInteger();
        while (true) {
            Future<RecordMetadata> future = null;
            try {
                future = kafkaProducer.send(new ProducerRecord<>("part3", "log gateway" + cnt.incrementAndGet()));
                RecordMetadata metadata = future.get();
                System.out.println("success send for " + metadata);
            } catch (Exception e) {
                e.printStackTrace();
            }

            Thread.sleep(3000);
        }
        //    for (int i = 1; i <= 6; i++) {
        //    }
        //    kafkaProducer.close();
    }

}
