package com.middleware.kafka;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.Test;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

public class ProducerDemo {

    //只有Topic 和 value，会把value轮询打入partition
    @Test
    public void producer1() throws Exception {

        String topic = "msb-items";
        Properties p = new Properties();
        p.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.0.111:9092,192.168.0.112:9092,192.168.0.113:9092");
        //kafka  持久化数据的MQ  数据-> byte[]，不会对数据进行干预，双方要约定编解码
        //kafka是一个app：：使用零拷贝  sendfile 系统调用实现快速数据消费
        p.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.ACKS_CONFIG, "0");

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(p);

        //只有Topic 和 value
        int i=1;
        while(true){
            ProducerRecord<String, String> record = new ProducerRecord<>(topic,"val" + i);
            Future<RecordMetadata> send = producer.send(record);

            RecordMetadata rm = send.get();
            int partition = rm.partition();
            long offset = rm.offset();
            System.out.println("key: "+ record.key()+" val: "+record.value()+" partition: "+partition + " offset: "+offset);

            i++;
            if (i==100){
                break;
            }
        }
    }

    //指定partition
    @Test
    public void producer2() throws Exception {

        String topic = "msb-items";
        Properties p = new Properties();
        p.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.0.111:9092,192.168.0.112:9092,192.168.0.113:9092");
        //kafka  持久化数据的MQ  数据-> byte[]，不会对数据进行干预，双方要约定编解码
        //kafka是一个app：：使用零拷贝  sendfile 系统调用实现快速数据消费
        p.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.ACKS_CONFIG, "0");

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(p);

        //只有Topic 和 value
        int i=100;
        while(true){
            ProducerRecord<String, String> record = new ProducerRecord<String, String>(topic,0,null,"value" + i);
            Future<RecordMetadata> send = producer.send(record);

            RecordMetadata rm = send.get();
            int partition = rm.partition();
            long offset = rm.offset();
            System.out.println("key: "+ record.key()+" value: "+record.value()+" partition: "+partition + " offset: "+offset);

            i++;
            if (i==300){
                break;
            }
        }
    }



    //不指定partition，但给了key，会根据key值hash出一个partition
    //test和prod hash到了一个分区
    @Test
    public void producer3() throws Exception {

        String topic = "msb-items";
        Properties p = new Properties();
        p.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.0.111:9092,192.168.0.112:9092,192.168.0.113:9092");
        //kafka  持久化数据的MQ  数据-> byte[]，不会对数据进行干预，双方要约定编解码
        //kafka是一个app：：使用零拷贝  sendfile 系统调用实现快速数据消费
        p.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.ACKS_CONFIG, "-1");

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(p);

        //只有Topic 和 value
        int i=1000;
        while(true){
            ProducerRecord<String, String> record = new ProducerRecord<String, String>(topic,0,"test","value" + i);
            Future<RecordMetadata> send = producer.send(record);

            RecordMetadata rm = send.get();
            int partition = rm.partition();
            long offset = rm.offset();
            System.out.println("key: "+ record.key()+" val: "+record.value()+" partition: "+partition + " offset: "+offset);

            i++;
            if (i==1100){
                break;
            }
        }
        while(true){
            ProducerRecord<String, String> record = new ProducerRecord<String, String>(topic,0,"prod","value" + i);
            Future<RecordMetadata> send = producer.send(record);

            RecordMetadata rm = send.get();
            int partition = rm.partition();
            long offset = rm.offset();
            System.out.println("key: "+ record.key()+" val: "+record.value()+" partition: "+partition + " offset: "+offset);

            i++;
            if (i==1200){
                break;
            }
        }
    }


    @Test
    public void producer() throws Exception {

        String topic = "msb-items";
        Properties p = new Properties();
        p.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"192.168.0.111:9092,192.168.0.112:9092,192.168.0.113:9092");
        //kafka  持久化数据的MQ  数据-> byte[]，不会对数据进行干预，双方要约定编解码
        //kafka是一个app：：使用零拷贝  sendfile 系统调用实现快速数据消费
        p.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        p.setProperty(ProducerConfig.ACKS_CONFIG, "-1");

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(p);

        //现在的producer就是一个提供者，面向的其实是broker，虽然在使用的时候我们期望把数据打入topic

        /*
        msb-items
        2partition
        三种商品，每种商品有线性的3个ID
        相同的商品最好去到一个分区里
         */
        //只有Topic 和 value

        while(true){
            for (int i = 0; i < 3; i++) {
                for (int j = 0; j <3; j++) {
                    ProducerRecord<String, String> record = new ProducerRecord<>(topic, "item"+j,"val" + i);
                    Future<RecordMetadata> send = producer
                            .send(record);

                    RecordMetadata rm = send.get();
                    int partition = rm.partition();
                    long offset = rm.offset();
                    System.out.println("key: "+ record.key()+" val: "+record.value()+" partition: "+partition + " offset: "+offset);

                }
            }
        }



    }
}
