package com.zjl.Kafka.第03章_操作kafka;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.Test;

import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeUnit;


public class 操作kafka {

    @Test
    public  void 生产者() {
        Map<String,Object> config = new HashMap<>();
        config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"82.157.71.243:9092");//连接地址
//        config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"82.157.71.243:9091,82.157.71.243:9092,82.157.71.243:9093");//连接地址
//        config.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"82.157.71.243:9091");//连接地址
        config.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());//key 的 序列化
        config.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());//value 的 序列化
        config.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, B1_拦截器.class.getName());//添加拦截器
        config.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, B2_自定义分区器.class.getName());//添加分区器
//        config.put(ProducerConfig.ACKS_CONFIG, "all");
        //创建生产者对象
        KafkaProducer<String,String> kafkaProducer = new KafkaProducer<>(config);
        for (int i = 0; i < 10000; i++) {

            //创建数据
            ProducerRecord<String,String> record = new ProducerRecord<>(
                    "java0011"//主题名称
                    ,"key" + i//数据的key
                    ,"value" + i//数据的value
            );
            //将数据发送到kafka
            kafkaProducer.send(record);
            System.out.println(record);
            try { TimeUnit.SECONDS.sleep(1); } catch (InterruptedException e) { throw new RuntimeException(e); }
        }
        try { TimeUnit.SECONDS.sleep(5); } catch (InterruptedException e) { throw new RuntimeException(e); }
        //关闭对象
        kafkaProducer.close();
    }

    @Test
    public  void 消费者() {
        Map<String, Object> config = new HashMap<>();
        config.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG,"82.157.71.243:9092");//连接地址
        config.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());//key 的 反序列化
        config.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());//value 的 反序列化
        config.put(ConsumerConfig.GROUP_ID_CONFIG, "java0061");//消费者组的名称
        //__consumer_offsets-xx    消费者偏移量文件
//        config.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, true);//自动提交偏移量  moren true
        config.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");//最小偏移量
//        config.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, 1000);//自动提交时间
        //创建消费者对象
        KafkaConsumer<String,String> kafkaConsumer = new KafkaConsumer<>(config);

        //订阅主题
        List<String> topics  = new ArrayList<>();
        topics.add("java0011");
        topics.add("java0016");
        topics.add("zjl1");
        topics.add("x01");
        topics.add("x03");
        topics.add("zjl001");
        kafkaConsumer.subscribe(topics);

        while (true) {
            //从生产者拉取数据
            ConsumerRecords<String, String> polls = kafkaConsumer.poll(Duration.ofMillis(500));
            System.out.println(polls.partitions().size());
            for (TopicPartition tops : polls.partitions()) {

                List<ConsumerRecord<String, String>> records = polls.records(tops);
                for (ConsumerRecord<String, String> pool:records){

                    System.out.println(pool);
                }
            }
//            kafkaConsumer.commitAsync();//手动保存偏移量  异步提交  优选
            kafkaConsumer.commitSync();//手动保存偏移量   同步提交
        }

        //关闭消费者
//        kafkaConsumer.close();

    }

}
