package com.link.serve.kafka.producer;

import com.link.serve.constants.LinkRedisKey;
import com.link.serve.kafka.factory.DynamicKafkaProducerFactory;
import com.link.serve.kafka.proto.KafkaMessageProto;
import com.link.serve.tcp.IMServerGroup;
import com.link.serve.tcp.message.Message;
import com.link.serve.tcp.message.proto.PackHeader;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.springframework.kafka.support.SendResult;
import org.springframework.stereotype.Component;
import org.springframework.util.concurrent.ListenableFuture;
import org.springframework.util.concurrent.ListenableFutureCallback;

import java.util.HashMap;
import java.util.Map;

import static com.link.serve.constants.KafkaConstant.IM_UPSTREAM_TOPIC;

@Slf4j
@Component
public class ImMessageProducer {
    @Value("${im.kafka.producer.key-serializer}")//配置业务指定的序列化器全限定类名
    private String imKeySerializerClassName;
    @Value("${im.kafka.producer.value-serializer}")
    private String imValueSerializerClassName;

    @Value("${spring.kafka.bootstrap-servers}")//配置指定的redis集群地址
    private String bootstrapServers;

    public Boolean sendImMessage(Long sessionId,Message message) throws ClassNotFoundException {
        return sendMessage(sessionId.toString(),message);
    }
    /**
     * 发送消息到一个topic内然后根据会话id的hash 指定分区 需要创建3个分区
     * @param key        会话的的 key
     * @param message    消息内容
     */
    public Boolean sendMessage(String key, Message message) throws ClassNotFoundException {

        String kafkaTopic = IM_UPSTREAM_TOPIC;// 设定 Topic，不同 topic 可以有不同的序列化配置

        Class<?> keySerializer = Class.forName(imKeySerializerClassName);
        Class<?> valueSerializer = Class.forName(imValueSerializerClassName);

        Map<String, Object> extraConfigs = new HashMap<>();// 若有额外配置，可组装到 extraConfigs 中，否则传 null

        KafkaTemplate<String, Message> kafkaTemplate =// 根据构造好的 ProducerFactory 创建 KafkaTemplate
                new KafkaTemplate<>(
                        DynamicKafkaProducerFactory
                                .createImMessageProducerFactory(
                                        bootstrapServers, keySerializer, valueSerializer, extraConfigs));



        ProducerRecord<String, Message> record =// 注意：取消传入 partition，由 Kafka 默认根据 key 的 hash 分区
                new ProducerRecord<>(kafkaTopic, key, message);

        ListenableFuture<SendResult<String, Message>> future = kafkaTemplate.send(record);// 发送消息

        future.addCallback(new ListenableFutureCallback<SendResult<String, Message>>() {
            @Override
            public void onSuccess(SendResult<String,Message> result) {
                log.info("上行推送到 IM 业务✅_topic={}_partition={}_offset={}_消息：{}",
                        result.getRecordMetadata().partition(), result.getRecordMetadata().offset(),message, kafkaTopic);
            }
            @Override
            public void onFailure(Throwable ex) {
                log.error("msg_send_❌: type={}_topic={}_error={}",
                        message, kafkaTopic, ex.getMessage());
            }
        });return true;
    }
}
