package com.cat.net.network.kafka;

import ch.qos.logback.core.util.TimeUtil;
import com.cat.net.network.base.*;
import com.cat.net.network.kafka.serializer.KafkaPacketSerializer;
import com.cat.net.network.rpc.IResponseCallback;
import com.cat.net.network.rpc.RpcCallbackCache;
import com.cat.net.network.rpc.RpcCallbackHandler;
import com.cat.net.util.SerializationUtil;
import org.apache.kafka.clients.admin.AdminClient;
import org.apache.kafka.clients.admin.AdminClientConfig;
import org.apache.kafka.clients.admin.NewTopic;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.core.DefaultKafkaProducerFactory;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.kafka.core.ProducerFactory;
import org.springframework.kafka.support.Acknowledgment;

import java.util.*;
import java.util.concurrent.TimeUnit;

/**
 * kafka服务, 由开发者注入此服务
 */
public class KafkaService {
    
    private final Logger logger = LoggerFactory.getLogger(this.getClass());

    @Autowired private KafkaConfig kafkaConfig;

    /**
     * kafka消息调度器
     */
    @Autowired private KafkaDispatcher kafkaDispatcher;

    /**
     * kafka 模板,处理序列化反序列化
     */
    private KafkaTemplate<String, Packet> kafkaTemplate;

    /**
     * 监听的默认请求主题
     */
    private List<String> requestTopics = new ArrayList<>();

    /**
     * 监听的响应主题
     */
    private List<String> responseTopics = new ArrayList<>();

    /**
     * 处理消息队列的消息响应, 支持长时间的等待.
     */
    private static final long TIMEOUT = TimeUnit.HOURS.toMillis(12);
    /**
     * 回调缓存
     */
    protected final RpcCallbackCache callbackCache = new RpcCallbackCache();

    public KafkaService(List<String> requests, List<String> responses){
        this.requestTopics = requests;
        this.responseTopics = responses;
    }

    /**
     * 初始化kafka服务, 注册监听的请求主题, 监听的响应主题.
     */
    public void startup() {
        this.initTemplate();
        this.createTopics();
    }

    public void shutdown() {

    }

    /**
     * 发送消息, 如果无回调, 序列号不做变化.
     * @param topic 消息主题
     * @param protocol 自定义消息
     */
    public void sendMessage(String topic, IProtocol protocol) {
        String requestId = UUID.randomUUID().toString();
        Packet packet = Packet.encode(protocol);
        ProducerRecord<String, Packet> record = new ProducerRecord<>(topic, requestId, packet);
        kafkaTemplate.send(record);
    }

    /**
     * 发送消息, 如有回调, 则处理回调.
     * @param topic 消息主题
     * @param protocol 自定义消息
     * @param callback 回调方法
     */
    public void sendMessage(String topic, IProtocol protocol, IResponseCallback<?> callback) {
        final long now = System.currentTimeMillis(),  expiredTime = now + TIMEOUT;
        RpcCallbackHandler<?> futureCallback = new RpcCallbackHandler<>(expiredTime, callback);
        //回调方法加入缓存时,生成序列号
        callbackCache.addCallback(futureCallback);
        //设置序列号
        protocol.setSeq(futureCallback.getSeq());
        //发送消息
        this.sendMessage(topic, protocol);
        //发送消息成功后, 去检测是否有需要清掉的回调函数
        callbackCache.checkExpired(now);
    }

    /**
     * 收到消息, 正常处理.
     * 对于是否响应消息到消息队列, 应当在业务逻辑层去处理, 而非在底层实现处理.
     * @param packet 消息体
     */
    @KafkaListener(topics = "#{__listener.requestTopics}", groupId = "#{__listener.kafkaConfig.getGroupId()}")
    public void receiveMessage(ConsumerRecord<String, Packet> record, Acknowledgment acknowledgment)  {
        this.kafkaDispatcher.onReceiveMessage(record.value());
        acknowledgment.acknowledge();
    }

    /**
     * 收到响应主题, 当做回调处理.
     * 对于是否响应消息到消息队列, 应当在业务逻辑层去处理, 而非在底层实现处理.
     * @param packet 消息包
     */
    @KafkaListener(topics = "#{__listener.responseTopics}", groupId = "#{__listener.kafkaConfig.getGroupId()}")
    public void receiveResponse(ConsumerRecord<String, Packet> record, Acknowledgment acknowledgment) {
        Packet packet = record.value();
        int cmd = packet.cmd();
        if (cmd < 0) {
            logger.warn("Received inner message, cmd:[{}]", cmd);
            return;
        }
        RemoteCaller caller = this.kafkaDispatcher.getRemoteCaller(cmd);
        if (caller == null) {
            logger.warn("收到未处理协议, cmd=[{}]", cmd);
            return;
        }
        Class<?> clazz = caller.getParamType();
        AbstractProtocol params = (AbstractProtocol) SerializationUtil.deserialize(packet.data(), clazz);
        callbackCache.receiveResponse(packet.seq(), packet.cmd(), params);
        acknowledgment.acknowledge();
    }

    public void initTemplate(){
        Map<String, Object> props = new HashMap<>();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConfig.getBootstrapServers());
        props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, String.class.getName());
        props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, KafkaPacketSerializer.class.getName());
        props.put(ProducerConfig.RETRIES_CONFIG, kafkaConfig.getRetries());
        props.put(ProducerConfig.LINGER_MS_CONFIG, kafkaConfig.getLinger());
        ProducerFactory<String, Packet> producerFactory = new DefaultKafkaProducerFactory<>(props);
        this.kafkaTemplate = new KafkaTemplate<>(producerFactory);
    }

    /**
     * 创建分区
     * 幂等性：Kafka的AdminClient.createTopics方法是幂等的，如果主题已经存在, Kafka不会重新创建它。这意味着可以放心地在每次应用启动时执行此操作，而不会重复创建主题。但是会抛出异常
     * org.apache.kafka.common.errors.TopicExistsException: Topic 'xxx' already exists.
     * numPartitions分区数, 分区的数量建议在3~10之间, 高并发处理可以选择更多的分区, 便于分散负载
     * replicationFactor复制因子, 用于复制数据, 如果设置为2, 表示复制到1台服务上, 这样在一台kafka节点宕机的情况下, 任然可以保证数据可用性. 如果设置为3表示最多复制到两个kafka节点, 适合对消息安全比较高的场景.
     *
     */
    public void createTopics() {
        long startTime = System.currentTimeMillis();
        Properties config = new Properties();
        config.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaConfig.getBootstrapServers());
        final int numPartitions = kafkaConfig.getPartition();
        final short replicationFactor = kafkaConfig.getReplicationFactor();
        try (AdminClient adminClient = AdminClient.create(config)) {
            // 获取现有主题
            List<String> existingTopics = new ArrayList<>(adminClient.listTopics().names().get());

            // 注册主题
            List<String> topics = new ArrayList<>(requestTopics);
            topics.addAll(responseTopics);

            //创建新主题
            List<NewTopic> newTopics = new ArrayList<>();
            for (String topicStr : topics) {
                if (existingTopics.contains(topicStr)){
                    continue;
                }
                NewTopic topic = new NewTopic(topicStr, numPartitions, replicationFactor);
                newTopics.add(topic);
            }
            adminClient.createTopics(newTopics).all().get();
        } catch (Exception e) {
            throw new RuntimeException("create topics failed");
        }
        logger.info("create topic cost time:{}", (System.currentTimeMillis()-startTime));
    }

}
