package com.zbj.storm.kafka;

import com.google.common.collect.Lists;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;

import java.util.List;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.TimeUnit;

/**
 * KafkaProducer
 *
 * @author weigang
 * @create 2019-09-16
 **/
public class KafkaProducer {

    private final Producer<String, String> producer;

    public final static String TOPIC = KafKaTopic.TOPIC;

    private KafkaProducer() {
        Properties properties = new Properties();

        // kafka端口
        properties.put("bootstrap.servers", "172.31.15.175:9092,172.31.15.175:9093,172.31.15.175:9094");

        // 配置value的序列化类
        //properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put("value.serializer", StringSerializer.class.getName());

        // 配置key的序列化
        properties.put("key.serializer", StringSerializer.class.getName());

        //request.required.acks
        //0, which means that the producer never waits for an acknowledgement from the broker (the same behavior as 0.7). This option provides the lowest latency but the weakest durability guarantees (some data will be lost when a server fails).
        //1, which means that the producer gets an acknowledgement after the leader replica has received the data. This option provides better durability as the client waits until the server acknowledges the request as successful (only messages that were written to the now-dead leader but not yet replicated will be lost).
        //-1, which means that the producer gets an acknowledgement after all in-sync replicas have received the data. This option provides the best durability, we guarantee that no messages will be lost as long as at least one in sync replica remains.
        properties.put("request.required.acks", "1");

        producer = new org.apache.kafka.clients.producer.KafkaProducer<>(properties);
    }

    void producer() throws Exception {
        List<String> sentenceList = Lists.newArrayList();
        sentenceList.add("So far we have been running against a single broker, but that's no fun. For Kafka, a single broker is just a cluster of size one, so nothing much changes other than starting a few more broker instances.");
        sentenceList.add("But just to get feel for it, let's expand our cluster to three nodes (still all on our local machine).");
        sentenceList.add("Writing data from the console and writing it back to the console is a convenient place to start, but you'll probably want to use data from other sources or export data from Kafka to other systems. For many systems, instead of writing custom integration code you can use Kafka Connect to import or export data.");
        sentenceList.add("Kafka Connect is a tool included with Kafka that imports and exports data to Kafka. It is an extensible tool that runs connectors, which implement the custom logic for interacting with an external system. In this quickstart we'll see how to run Kafka Connect with simple connectors that import data from a file to a Kafka topic and export data from a Kafka topic to a file.");
        sentenceList.add("Next, we'll start two connectors running in standalone mode, which means they run in a single, local, dedicated process. We provide three configuration files as parameters. The first is always the configuration for the Kafka Connect process, containing common configuration such as the Kafka brokers to connect to and the serialization format for data. The remaining configuration files each specify a connector to create. These files include a unique connector name, the connector class to instantiate, and any other configuration required by the connector.");

        while (true) {
            for (String sentence : sentenceList) {
                String key = UUID.randomUUID().toString();
                producer.send(new ProducerRecord<>(TOPIC, key, sentence));
                System.out.println("key-> " + key + " value-> " + sentence);
                TimeUnit.SECONDS.sleep(3);
            }
        }
    }

    public static void main(String[] args) throws Exception {
        new KafkaProducer().producer();
    }

}