package com.li.kafka.producer;

import com.li.kafka.admin.AdminSample;
import org.apache.kafka.clients.producer.*;

import java.util.Properties;
import java.util.concurrent.ExecutionException;
import java.util.concurrent.Future;

public class ProducerSample {
  private static KafkaProducer<String, String> producer;

  public static void main(String[] args) throws ExecutionException, InterruptedException {
    // 异步发送
    //    ProducerSample.producerSend();
    // 异步阻塞发送
    //    ProducerSample.producerSendSync();

    // 异步回掉发送
    ProducerSample.producerSendCallback();
  }

  /** 异步发送 */
  public static void producerSend() {
    KafkaProducer<String, String> producer = ProducerSample.getProducer();

    for (int i = 0; i < 10; i++) {
      ProducerRecord<String, String> record =
          new ProducerRecord<>(AdminSample.TOPIC_NAME, "key" + i, "value" + i);
      producer.send(record);
    }
    producer.close();
  }

  /** 异步阻塞发送 */
  public static void producerSendSync() throws ExecutionException, InterruptedException {
    KafkaProducer<String, String> producer = ProducerSample.getProducer();

    for (int i = 0; i < 10; i++) {
      ProducerRecord<String, String> record =
          new ProducerRecord<>(AdminSample.TOPIC_NAME, "key" + i, "value" + i);
      Future<RecordMetadata> send = producer.send(record);
      RecordMetadata recordMetadata = send.get(); // send.get()达到阻塞目的
      System.out.println(
          "partition:" + recordMetadata.partition() + ";offset:" + recordMetadata.offset());
    }
    producer.close();
  }

  /** 异步回掉发送 */
  public static void producerSendCallback() {
    KafkaProducer<String, String> producer = ProducerSample.getProducer();

    for (int i = 0; i < 10; i++) {
      String key = "key" + i;
      ProducerRecord<String, String> record =
          new ProducerRecord<>(AdminSample.TOPIC_NAME, key, "value" + i);
      producer.send(
          record,
          new Callback() {
            @Override
            public void onCompletion(RecordMetadata recordMetadata, Exception e) {
              System.out.println(
                  "partition:"
                      + recordMetadata.partition()
                      + "offset:"
                      + recordMetadata.offset()
                      + ";key:"
                      + key);
            }
          });
    }
    producer.close();
  }

  public static KafkaProducer<String, String> getProducer() {
    if (ProducerSample.producer == null) {
      Properties prop = new Properties();
      prop.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "1.15.61.173:9092");
      prop.setProperty(ProducerConfig.ACKS_CONFIG, "all");
      prop.setProperty(ProducerConfig.RETRIES_CONFIG, "0");
      prop.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, "16384");
      prop.setProperty(ProducerConfig.LINGER_MS_CONFIG, "1");
      prop.setProperty(ProducerConfig.BUFFER_MEMORY_CONFIG, "33554432");
      prop.setProperty(
          ProducerConfig.PARTITIONER_CLASS_CONFIG, "com.li.kafka.producer.SamplePartition");

      prop.setProperty(
          ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,
          "org.apache.kafka.common.serialization.StringSerializer");
      prop.setProperty(
          ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,
          "org.apache.kafka.common.serialization.StringSerializer");

      ProducerSample.producer = new KafkaProducer<>(prop);
    }
    return ProducerSample.producer;
  }
}
