package com.middleware.kafka;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.Test;

import java.util.Properties;
import java.util.concurrent.Future;

public class Producer1 {

    //多个producer打入同一个partition，会交叉写入，并且offset为-1
    @Test
    public void producer1() throws Exception {

        String topic = "msb-items";
        Properties p = new Properties();
        p.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.0.111:9092,192.168.0.112:9092,192.168.0.113:9092");
        //kafka  持久化数据的MQ  数据-> byte[]，不会对数据进行干预，双方要约定编解码
        //kafka是一个app：：使用零拷贝  sendfile 系统调用实现快速数据消费
        p.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.ACKS_CONFIG, "0");

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(p);

        //只有Topic 和 value
        int i=1;
        while(true){
            ProducerRecord<String, String> record = new ProducerRecord<>(topic,0,null,"tom" + i);
            Future<RecordMetadata> send = producer.send(record);

            RecordMetadata rm = send.get();
            int partition = rm.partition();
            long offset = rm.offset();
            System.out.println("key: "+ record.key()+" val: "+record.value()+" partition: "+partition + " offset: "+offset);

            i++;
            /*if (i==1000){
                break;
            }*/
        }
    }

    @Test
    public void producer2() throws Exception {

        String topic = "msb-items";
        Properties p = new Properties();
        p.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.0.111:9092,192.168.0.112:9092,192.168.0.113:9092");
        //kafka  持久化数据的MQ  数据-> byte[]，不会对数据进行干预，双方要约定编解码
        //kafka是一个app：：使用零拷贝  sendfile 系统调用实现快速数据消费
        p.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.ACKS_CONFIG, "0");

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(p);

        //只有Topic 和 value
        int i=1;
        while(true){
            ProducerRecord<String, String> record = new ProducerRecord<>(topic,0,null,"jack" + i);
            Future<RecordMetadata> send = producer.send(record);

            RecordMetadata rm = send.get();
            int partition = rm.partition();
            long offset = rm.offset();
            System.out.println("key: "+ record.key()+" val: "+record.value()+" partition: "+partition + " offset: "+offset);

            i++;
            /*if (i==1000){
                break;
            }*/
        }
    }

}
