package com.yifeng.repo.controller.kafka.message;

import com.gomcarter.frameworks.base.common.AssertUtils;
import com.gomcarter.frameworks.base.exception.CustomException;
import com.google.common.base.Strings;
import com.yifeng.repo.base.utils.common.BaseUtil;
import com.yifeng.repo.controller.kafka.configure.KafkaProperties;
import com.yifeng.repo.controller.kafka.message.handler.KafkaHelper;
import com.yifeng.repo.controller.kafka.message.produce.OrderPartitioner;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.CommonClientConfigs;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.Cluster;
import org.apache.kafka.common.PartitionInfo;
import org.apache.kafka.common.config.SaslConfigs;
import org.apache.kafka.common.security.auth.SecurityProtocol;
import org.apache.kafka.common.serialization.StringSerializer;

import java.nio.charset.StandardCharsets;
import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;

import static com.yifeng.repo.controller.kafka.message.constant.KafkaConstant.PROPERTY_TAGS;
import static com.yifeng.repo.controller.kafka.message.constant.KafkaConstant.SASL_MECHANISM_VALUE;

/**
 * Created by daibing on 2024/4/29.
 */
@Slf4j
public class KafkaProduceManager {
    protected final ConcurrentMap<String, KafkaProducer<String, String>> topic2Producer = new ConcurrentHashMap<>();
    protected final String applicationName;
    protected final KafkaProperties properties;

    public KafkaProduceManager(String applicationName, KafkaProperties properties) {
        this.applicationName = applicationName;
        this.properties = properties;
    }

    public void init() {
        log.info("yfcloud controller Kafka ProduceManager init success: OWNER_ID={}, OWNER_NAME={}", BaseUtil.OWNER_ID, BaseUtil.OWNER_NAME);
    }

    public void destroy() {
        topic2Producer.forEach((key, value) -> value.close());
        log.info("yfcloud controller Kafka ProduceManager destroy success: OWNER_ID={}, OWNER_NAME={}", BaseUtil.OWNER_ID, BaseUtil.OWNER_NAME);
    }

    /**
     * 发送消息
     *
     * @param topic   消息主题
     * @param tag     消息标签用在 Consumer的场景中，用于服务端消息过滤。
     * @param key     消息在业务层面的唯一标识，业务可以根据这个Key查找消息，要求Key唯一。
     * @param body    消息体
     * @param headers 消息头
     * @return 返回结果
     */
    public RecordMetadata sendMsg(String topic, String tag, String key, String body, Map<String, String> headers) {
        KafkaProducer<String, String> producer = getProducer(topic);
        ProducerRecord<String, String> msg = new ProducerRecord<>(topic, key, body);
        if (!Strings.isNullOrEmpty(tag)) {
            msg.headers().add(PROPERTY_TAGS, tag.getBytes(StandardCharsets.UTF_8));
        }
        headers.forEach((k, v) -> msg.headers().add(k, v.getBytes(StandardCharsets.UTF_8)));
        try {
            return producer.send(msg).get();
        } catch (Throwable t) {
            throw new CustomException(t);
        }
    }

    public RecordMetadata sendMsg(String topic, String tag, String key, String body) {
        return sendMsg(topic, tag, key, body, Collections.emptyMap());
    }

    public RecordMetadata sendMsg(String topic, String key, String body) {
        return sendMsg(topic, null, key, body, Collections.emptyMap());
    }

    /**
     * 发送顺序消息
     *
     * @param topic    消息主题
     * @param tag      消息标签用在 Consumer的场景中，用于服务端消息过滤。
     * @param key      消息在业务层面的唯一标识，业务可以根据这个Key查找消息，要求Key唯一。
     * @param body     消息体
     * @param headers  消息头
     * @param orderArg 顺序参数，决定选择消息队列，相同参数落到同一个队列就可以保证顺序，默认用tag
     * @return msgId
     */
    public RecordMetadata sendOrderMsg(String topic, String tag, String key, String body, Map<String, String> headers, Object orderArg) {
        KafkaProducer<String, String> producer = getProducer(topic);
        Integer partition = getPartition(topic, tag, key, orderArg, producer);
        ProducerRecord<String, String> msg = new ProducerRecord<>(topic, partition, key, body);
        if (!Strings.isNullOrEmpty(tag)) {
            msg.headers().add(PROPERTY_TAGS, tag.getBytes(StandardCharsets.UTF_8));
        }
        headers.forEach((k, v) -> msg.headers().add(k, v.getBytes(StandardCharsets.UTF_8)));
        try {
            return producer.send(msg).get();
        } catch (Throwable t) {
            throw new CustomException(t);
        }
    }

    public RecordMetadata sendOrderMsg(String topic, String tag, String key, String body, Object orderArg) {
        return sendOrderMsg(topic, tag, key, body, Collections.emptyMap(), orderArg);
    }

    /**
     * 使用 key 作为顺序参数
     */
    public RecordMetadata sendOrderMsg(String topic, String tag, String key, String body) {
        return sendOrderMsg(topic, tag, key, body, Collections.emptyMap(), null);
    }

    public RecordMetadata sendOrderMsg(String topic, String key, String body) {
        return sendOrderMsg(topic, null, key, body, Collections.emptyMap(), null);
    }

    /**
     * @see RoundRobinPartitioner#partition(String, Object, byte[], Object, byte[], Cluster)
     */
    private Integer getPartition(String topic, String tag, String key, Object orderArg, KafkaProducer<String, String> producer) {
        // 如果启用自定义顺序分区器，就不再直接指定分区
        KafkaProperties.TopicProperties topicProperties = KafkaHelper.getTopicProperties(properties, topic);
        if (topicProperties.getCustomPartitionerClass() != null
                && OrderPartitioner.class.isAssignableFrom(topicProperties.getCustomPartitionerClass())) {
            return null;
        }

        // 获取 partitionNum
        int partitionNum = producer.partitionsFor(topic).size();
        AssertUtils.isTrue(partitionNum > 0, topic + " partitionNum 错误：" + partitionNum);

        // 优先按照arg排序
        if (orderArg != null) {
            return (orderArg.hashCode() & Integer.MAX_VALUE) % partitionNum;
        }

        // 其次使用tag作为路由键
        if (!Strings.isNullOrEmpty(tag)) {
            return (tag.hashCode() & Integer.MAX_VALUE) % partitionNum;
        }

        // 默认使用key作为路由键
        return (key.hashCode() & Integer.MAX_VALUE) % partitionNum;
    }

    /**
     * 查询指定topic的分区信息
     */
    public List<PartitionInfo> listPartitionInfo(String topic) {
        KafkaProducer<String, String> producer = topic2Producer.get(topic);
        return new ArrayList<>(producer.partitionsFor(topic));
    }

    /**
     * 获取分区的统计数据
     *
     * @param topic     消息主题
     * @param timestamp 开始时间，单位毫秒
     * @return partition -> offset
     */
    public Map<Integer, Long> listPartitionStats(String topic, long timestamp) {
        throw new CustomException("暂不支持查询分区统计数据");
    }

    protected KafkaProducer<String, String> getProducer(String topic) {
        if (properties.isMasterTopicOnlyProduce() && !properties.getMasterTopic().equals(topic)) {
            throw new CustomException("当前仅支持发布消息到应用自己的topic：" + topic);
        }
        KafkaProducer<String, String> producer = topic2Producer.get(topic);
        if (producer != null) {
            return producer;
        }
        synchronized (topic2Producer) {
            producer = topic2Producer.get(topic);
            if (producer != null) {
                return producer;
            }
            producer = buildNewProducer(topic);
            topic2Producer.put(topic, producer);
            return producer;
        }
    }

    protected KafkaProducer<String, String> buildNewProducer(String topic) {
        KafkaProperties.TopicProperties topicProperties = KafkaHelper.getTopicProperties(properties, topic);

        Properties properties = new Properties();
        properties.put(CommonClientConfigs.SECURITY_PROTOCOL_CONFIG, SecurityProtocol.SASL_PLAINTEXT.name);
        properties.put(SaslConfigs.SASL_MECHANISM, SASL_MECHANISM_VALUE);
        properties.put(SaslConfigs.SASL_JAAS_CONFIG, KafkaHelper.getProducerAuthContent(topicProperties));
        properties.put(ProducerConfig.CLIENT_ID_CONFIG, applicationName + "-" + topic + "-producer");
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, topicProperties.getNamesrvAddr());
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
        properties.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, topicProperties.getSendMsgTimeout());
        properties.put(ProducerConfig.RETRIES_CONFIG, topicProperties.getRetryTimes());
        properties.put(ProducerConfig.MAX_REQUEST_SIZE_CONFIG, topicProperties.getMaxMessageSize());
        properties.put(ProducerConfig.BATCH_SIZE_CONFIG, topicProperties.getBatchSize());
        properties.put(ProducerConfig.LINGER_MS_CONFIG, topicProperties.getLingerMs());
        properties.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, topicProperties.isEnableIdempotence());
        if (topicProperties.getCustomPartitionerClass() != null) {
            properties.put(ProducerConfig.PARTITIONER_CLASS_CONFIG, topicProperties.getCustomPartitionerClass().getName());
        }

        KafkaProducer<String, String> producer = new KafkaProducer<>(properties);
        log.info("buildNewProducer success: {}, namesrvAddr={}, sendMsgTimeout={}, maxMessageSize={}",
                topic, topicProperties.getNamesrvAddr(), topicProperties.getSendMsgTimeout(), topicProperties.getMaxMessageSize());
        return producer;
    }

}
