/***
 *   @title qz
 *   @description <description class purpose>
 *   @author yichen
 *   @version 1.0.0
 *   @create 2023/9/23 0023
 **/
package io.gitee.wokaixin.yunque.driver;

import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.*;
import org.springframework.stereotype.*;

import java.util.*;

@SuppressWarnings("unchecked")
//@Service
public class KafkaDriver implements Driver, Callback {
    private String topic;

    KafkaProducer<String, Object> producer;
    private  KafkaConfig kafkaConfig;
    public KafkaDriver(KafkaConfig kafkaConfig){
        this.kafkaConfig  = kafkaConfig;
        this.producer = kafkaProducer();
    }
    public KafkaDriver(KafkaProducer<String, Object> producer) {

        this.producer = producer;
    }

    public KafkaDriver setTopic(String topic) {
        this.topic = topic;
        return this;
    }
    //   kafka 生产
    private KafkaProducer<String, Object> kafkaProducer() {
        Properties properties = new Properties();
//        this.topic = topic;
        String brokerList =kafkaConfig.getBrokerList();
        //	1.1	BOOTSTRAP_SERVERS_CONFIG：连接kafka集群的服务列表，如果有多个，使用"逗号"进行分隔
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
        //	1.2	CLIENT_ID_CONFIG：这个属性的目的是标记kafkaclient的ID
//        properties.put(ProducerConfig.CLIENT_ID_CONFIG, "quickstart-producer");
        //	1.3 KEY_SERIALIZER_CLASS_CONFIG VALUE_SERIALIZER_CLASS_CONFIG
        //	Q: 对 kafka的 key 和 value 做序列化，为什么需要序列化？
        //	A: 因为KAFKA Broker 在接收消息的时候，必须要以二进制的方式接收，所以必须要对KEY和VALUE进行序列化
        //	字符串序列化类：org.apache.kafka.common.serialization.StringSerializer
        //	KEY: 是kafka用于做消息投递计算具体投递到对应的主题的哪一个partition而需要的
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
        //	VALUE: 实际发送消息的内容
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class.getName());
//        String isAsyn = environment.getProperty("queue." + redisSource + ".isAsyn");
//        String brokerVersion= environment.getProperty("queue." + redisSource + ".brokerVersion");
//        String requiredAck = environment.getProperty("queue." + redisSource + ".requiredAck");
//        String produceInterval = environment.getProperty("queue." + produceInterval + ".requiredAck");
        //	2.创建kafka生产者对象 传递properties属性参数集合
        KafkaProducer<String, Object> producer = new KafkaProducer<>(properties);
        return producer;
    }

    @Override
    public boolean sendTo(byte[] msg) {

        return sendTo(msg, this.topic);
    }
//    生产消息 结果回调
    @Override
    public void onCompletion(RecordMetadata recordMetadata, Exception e) {
        if (e == null) {
            System.out.println("主题为：" + recordMetadata.topic() + " 分区为：" + recordMetadata.partition());
        } else {
            System.out.println("失败" + "主题为：" + recordMetadata.topic() + " 分区为：" + recordMetadata.partition());
        }
    }

    @Override
    public boolean sendTo(byte[] msg, String topic) {
//        String str = new String(msg);
        try {
            ProducerRecord<String, Object> record =
                    //	arg1：topic , arg2：实际的消息体内容
                    new ProducerRecord<String, Object>(topic, msg);

            //	4.发送消息
            producer.send(record, this);
            //	5.关闭生产者
            producer.close();
            return true;
        } catch (Exception e) {
            System.out.println(e);
        }
        return false;
    }
}
