package com.gjy.kafka.java;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import org.apache.kafka.common.serialization.StringSerializer;
import org.junit.jupiter.api.Test;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.util.Objects;
import java.util.Properties;
import java.util.concurrent.ExecutionException;

/**
 * @author gjy
 * @version 1.0
 * @since 2024-06-19 22:16:14
 */
public class ProducerTest {

    private static final Logger log = LoggerFactory.getLogger(ProducerTest.class);

    @Test
    public void test12() throws ExecutionException, InterruptedException {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.253.131:9098,192.168.253.131:9097,192.168.253.131:9099");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, AlterKeySerde.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.ACKS_CONFIG, "all");
        properties.put(ProducerConfig.RETRIES_CONFIG, "3");
        properties.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1");

        KafkaProducer<Alter, String> producer = new KafkaProducer<>(properties);
        Alter alter = new Alter("0", "stage 0", "0", "0-m");

        ProducerRecord<Alter, String> record = new ProducerRecord<>("first", alter, alter.getMessage());
        RecordMetadata metadata = producer.send(record).get();

        log.info("{}, {}, {}, {}", metadata.topic(), metadata.timestamp(), metadata.partition(), metadata.offset());
    }

    @Test
    public void test11() throws Exception {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, "192.168.253.131:9098,192.168.253.131:9097,192.168.253.131:9099");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.ACKS_CONFIG, "all");
        properties.put(ProducerConfig.RETRIES_CONFIG, "3");
        properties.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "1");

        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);

        RecordMetadata metadata = producer.send(new ProducerRecord<>("first", "sender uu")).get();
        log.info("{}, {}, {}, {}", metadata.topic(), metadata.timestamp(), metadata.partition(), metadata.offset());
    }

    /**
     * 创建 Kafka 生产者，采用异步的方式发送到 Kafka Broker
     */
    @Test
    public void test1() {
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfig.BOOTSTRAP_SERVER);
        properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaConfig.KEY_SERIALIZER);
        properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaConfig.VALUE_SERIALIZER);

        /*properties.setProperty(ProducerConfig.ACKS_CONFIG, "");
        properties.setProperty(ProducerConfig.BATCH_SIZE_CONFIG, "");
        properties.setProperty(ProducerConfig.BUFFER_MEMORY_CONFIG, "");
        properties.setProperty(ProducerConfig.LINGER_MS_CONFIG, "");
        properties.setProperty(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "");
        properties.setProperty(ProducerConfig.RETRIES_CONFIG, "");
        properties.setProperty(ProducerConfig.RETRY_BACKOFF_MS_CONFIG, "");
        properties.setProperty(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, "");
        properties.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, "");*/

        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        for (int i = 0; i < 3; i++) {
            producer.send(new ProducerRecord<>("first", "sender " + i));
        }

        producer.close();
    }

    @Test
    public void test2() {
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfig.BOOTSTRAP_SERVER);
        properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaConfig.KEY_SERIALIZER);
        properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaConfig.VALUE_SERIALIZER);


        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        for (int i = 0; i < 3; i++) {
            producer.send(new ProducerRecord<>("first", "kafka-producer- " + i), (metadata, e) -> {
                if (Objects.isNull(e)) { // 发送成功
                    log.info("主题: {}, 分区: {}", metadata.topic(), metadata.partition());
                } else {
                    e.printStackTrace();
                }
            });
        }

        producer.close();
    }

    @Test
    public void test3() throws ExecutionException, InterruptedException {
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfig.BOOTSTRAP_SERVER);
        properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaConfig.KEY_SERIALIZER);
        properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaConfig.VALUE_SERIALIZER);


        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        for (int i = 0; i < 3; i++) {
            producer.send(new ProducerRecord<>("first", "kafka-producer- " + i)).get();
        }

        producer.close();
    }

    @Test
    public void test4() {
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfig.BOOTSTRAP_SERVER);
        properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaConfig.KEY_SERIALIZER);
        properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaConfig.VALUE_SERIALIZER);


        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        for (int i = 0; i < 3; i++) {
            // 0 表示分区 "" 表示key
            producer.send(new ProducerRecord<>("first", 0, "", "kafka-producer- " + i));
        }

        producer.close();
    }

    @Test
    public void test5() {
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfig.BOOTSTRAP_SERVER);
        properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaConfig.KEY_SERIALIZER);
        properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaConfig.VALUE_SERIALIZER);


        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        for (int i = 0; i < 3; i++) {
            // 没有partition，将 key 的 hash 值与 topic 的 partition 数进行取余得到 partition 值。
            producer.send(new ProducerRecord<>("first", "a", "kafka-producer- " + i));
        }

        producer.close();
    }

    @Test
    public void test6() {
        Properties properties = new Properties();
        properties.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfig.BOOTSTRAP_SERVER);
        properties.setProperty(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaConfig.KEY_SERIALIZER);
        properties.setProperty(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaConfig.VALUE_SERIALIZER);
        // 设置自定义分区器
        properties.setProperty(ProducerConfig.PARTITIONER_CLASS_CONFIG, "com.gjy.kafka.simple.PartitionerImpl");


        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        for (int i = 0; i < 3; i++) {
            // 没有partition，将 key 的 hash 值与 topic 的 partition 数进行取余得到 partition 值。
            producer.send(new ProducerRecord<>("first", "a", "kafka-producer- " + i));
        }

        producer.close();
    }

    // 生产者如何提高吞吐量
    @Test
    public void test7() {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfig.BOOTSTRAP_SERVER);
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaConfig.KEY_SERIALIZER);
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaConfig.VALUE_SERIALIZER);

        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, 16384);
        properties.put(ProducerConfig.LINGER_MS_CONFIG, 1);
        properties.setProperty(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");
        properties.put(ProducerConfig.BUFFER_MEMORY_CONFIG, 33554432);

        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        for (int i = 0; i < 3; i++) {
            // 没有partition，将 key 的 hash 值与 topic 的 partition 数进行取余得到 partition 值。
            producer.send(new ProducerRecord<>("first", "a", "kafka-producer- " + i));
        }

        producer.close();
    }

    @Test
    public void test8() {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfig.BOOTSTRAP_SERVER);
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaConfig.KEY_SERIALIZER);
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaConfig.VALUE_SERIALIZER);

        properties.put(ProducerConfig.ACKS_CONFIG, "all"); // 设置ack
        properties.put(ProducerConfig.RETRIES_CONFIG, 3); // 设置重试次数

        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        for (int i = 0; i < 3; i++) {
            // 没有partition，将 key 的 hash 值与 topic 的 partition 数进行取余得到 partition 值。
            producer.send(new ProducerRecord<>("first", "a", "kafka-producer- " + i));
        }

        producer.close();
    }

    @Test
    public void test9() {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfig.BOOTSTRAP_SERVER);
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaConfig.KEY_SERIALIZER);
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaConfig.VALUE_SERIALIZER);

        // 设置事务ID,事务ID任意起名
        properties.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "transaction_id_0");

        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        producer.initTransactions(); // 初始化事务
        producer.beginTransaction(); // 开启事务
        try {
            for (int i = 0; i < 3; i++) {
                // 没有partition，将 key 的 hash 值与 topic 的 partition 数进行取余得到 partition 值。
                producer.send(new ProducerRecord<>("first", "a", "kafka-producer- " + i));
            }

            producer.commitTransaction(); // 提交事务
        } catch (Exception e) {
            producer.abortTransaction(); // 终止事务
        } finally {
            producer.close();
        }
    }

    @Test
    public void test10() {
        Properties properties = new Properties();
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, KafkaConfig.BOOTSTRAP_SERVER);
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, KafkaConfig.KEY_SERIALIZER);
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaConfig.VALUE_SERIALIZER);
        properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "com.gjy.kafka.simple.PartitionerImpl");

        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        try {
            for (int i = 0; i < 3; i++) {
                // 没有partition，将 key 的 hash 值与 topic 的 partition 数进行取余得到 partition 值。
                producer.send(new ProducerRecord<>("first", "a", "kafka-producer-p" + i));
            }
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            producer.close();
        }
    }

}
