package com.yz.kafka.service;

import com.yz.kafka.config.CustomKafkaConfig;
import com.yz.kafka.producers.CustomStudentSerializer;
import com.yz.kafka.producers.Student;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.IntegerDeserializer;
import org.apache.kafka.common.serialization.IntegerSerializer;
import org.apache.kafka.common.serialization.StringDeserializer;
import org.apache.kafka.common.serialization.StringSerializer;
import org.springframework.stereotype.Component;

import javax.annotation.Resource;
import java.time.Duration;
import java.time.temporal.ChronoUnit;
import java.util.Collections;
import java.util.Properties;

/**
 * @author yuanzheng
 * @version 1.0
 * @since 2020-12-24
 */
@Component
public class KafkaTestComponent {

    @Resource
    private CustomKafkaConfig customKafkaConfig;

    /**
     * 同步发送消息（自定义序列化器）
     */
    public void producerSynAndCustomSerializer() {
        Properties properties = new Properties();
        // 设置key序列号器
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // 设置value序列号器
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, CustomStudentSerializer.class.getName());
        // 设置集群地址
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, customKafkaConfig.getBrokerList());
        // 设置重试次数
        properties.put(ProducerConfig.RETRIES_CONFIG, 2);

        KafkaProducer<String, Student> producer = new KafkaProducer<>(properties);
        Student student = Student.builder().name("卡夫卡").address("郑州").build();
        ProducerRecord<String, Student> record = new ProducerRecord<>(customKafkaConfig.getTopic(), "kafka-demo", student);
        try {
            /*同步发送消息*/
            RecordMetadata metadata = producer.send(record).get();
            System.out.printf("topic=%s, partition=%d, offset=%s \n", metadata.topic(), metadata.partition(), metadata.offset());
        } catch (Exception e) {
            e.printStackTrace();
        }
        producer.close();
    }

    /**
     * 异步发送消息（自定义分区）
     * 如果主题只有一个分区，那么放入其他分区的数据将要丢失
     */
    public void producerAsynAndCustomPartition() {
        Properties properties = new Properties();
        // 设置key序列号器
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, IntegerSerializer.class.getName());
        // 设置value序列号器
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        // 设置集群地址
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, customKafkaConfig.getBrokerList());
        // 设置重试次数
        properties.put(ProducerConfig.RETRIES_CONFIG, 2);
        /*传递自定义分区器*/
        properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, "com.yz.kafka.producers.CustomPartitioner");
        /*传递分区器所需的参数*/
        properties.put("pass.line", 6);
        Producer<Integer, String> producer = new KafkaProducer<>(properties);
        for (int i = 0; i <= 10; i++) {
            String score = "score:" + i;
            ProducerRecord<Integer, String> record = new ProducerRecord<>(customKafkaConfig.getTopic(), i, score);
            /*异步发送消息*/
            producer.send(record, (metadata, exception) -> System.out.printf("%s, partition=%s, offset=%s \n", score, metadata.partition(), metadata.offset()));
        }
        producer.close();
    }

    /**
     * 消费者，同步提交偏移量
     */
    public void consumerSyn() {
        Properties properties = new Properties();
        // 设置key序列号器
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class.getName());
        // 设置value序列号器
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        // 设置集群地址
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, customKafkaConfig.getBrokerList());
        /*指定分组 ID*/
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, customKafkaConfig.getGroup());
        // 创建消费者
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        /*订阅主题 (s)*/
        consumer.subscribe(Collections.singletonList(customKafkaConfig.getTopicCustomPartition()));
        try {
            while (true) {
                /*轮询获取数据*/
                ConsumerRecords<String, String> records = consumer.poll(Duration.of(100, ChronoUnit.MILLIS));
                for (ConsumerRecord<String, String> record : records) {
                    System.out.printf("topic = %s,partition = %d, key = %s, value = %s, offset = % d,\n ", record.topic(), record.partition(), record.key(), record.value(), record.offset());
                }
                /*同步提交*/
                consumer.commitSync();
            }
        } finally {
            consumer.close();
        }
    }

    /**
     * 消费者，异步提交偏移量
     */
    public void consumerAsyn() {
        Properties properties = new Properties();
        // 设置key序列号器
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, IntegerDeserializer.class.getName());
        // 设置value序列号器
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
        // 设置集群地址
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, customKafkaConfig.getBrokerList());
        /*指定分组 ID*/
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, customKafkaConfig.getGroup());
        // 创建消费者
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);
        /*订阅主题 (s)*/
        consumer.subscribe(Collections.singletonList(customKafkaConfig.getTopicCustomPartition()));
        try {
            while (true) {
                /*轮询获取数据*/
                ConsumerRecords<String, String> records = consumer.poll(Duration.of(100, ChronoUnit.MILLIS));
                for (ConsumerRecord<String, String> record : records) {
                    System.out.printf("topic = %s,partition = %d, key = %s, value = %s, offset = % d,\n ", record.topic(), record.partition(), record.key(), record.value(), record.offset());
                }
                /*异步提交*/
                consumer.commitAsync((info, exception) -> {
                    if (exception != null) {
                        System.out.println("错误处理");
                    } else {
                        info.forEach((topicPartition, offsetAndMetadata) ->
                                //System.out.printf("topic = %s,partition = %d, offset = %s", topicPartition.topic(), topicPartition.partition(), offsetAndMetadata.offset())
                                System.out.printf("topicPartition = %s, offsetAndMetadata = %s \n", topicPartition.toString(), offsetAndMetadata.toString())
                        );
                    }
                });
            }
        } finally {
            consumer.close();
        }
    }
}



