package com.lry;

import com.alibaba.fastjson.JSONObject;
import com.lry.common.CommonConstants;
import com.lry.config.BrokerConfig;
import com.lry.config.MessageStoreConfig;
import com.lry.config.StorePathConfigHelper;
import com.lry.filter.DelayMessageFilter;
import com.lry.filter.MessageFilter;
import com.lry.filter.TagMessageFilter;
import com.lry.lock.SegmentLock;
import com.lry.message.*;
import com.lry.persist.ConsumeQueuePersist;
import com.lry.persist.TopicPersist;
import com.lry.producer.SendResult;
import com.lry.producer.SendStatus;
import com.lry.selector.MessageSelector;
import com.lry.selector.RandomMessageSelector;
import com.lry.thread.BrokerThreadFactory;
import com.lry.topic.TopicPublishInfo;
import lombok.SneakyThrows;
import org.apache.commons.lang3.StringUtils;

import java.util.*;
import java.util.concurrent.*;
import java.util.concurrent.atomic.AtomicLong;

/**
 * 负责所有client请求，发送消息，查询消息等
 */
public class BrokerController {

    //topic,若干队列
    private Map<String,List<MessageQueue>> topicQueue;

    //<topic@group,consumer,队列的索引>
    private Map<String, Map<String,List<Integer>>> consumeQueue;

    //<topic@group:consumer,上次心跳的时间戳>
    private Map<String,Long> heartBeatMap = new ConcurrentHashMap<>();

    private ScheduledExecutorService scheduledExecutorService  = Executors.newScheduledThreadPool(1);

    private MessageSelector messageSelector = new RandomMessageSelector();

    private BrokerConfig brokerConfig;

    private DefaultMessageStore messageStore;

    private MessageStoreConfig messageStoreConfig;

    private SegmentLock<String> segmentLock = new SegmentLock<>();

    private long heartbeatTimout = 2000;

    private MessageFilter tagMessageFilter = new TagMessageFilter();

    private MessageFilter delayMessageFilter = new DelayMessageFilter();

    private boolean master;

    ThreadPoolExecutor exec = new ThreadPoolExecutor
            (30,
                    100,
                    5,
                    TimeUnit.SECONDS,
                    new LinkedBlockingQueue<Runnable>(),
                    BrokerThreadFactory.factory,
                    new ThreadPoolExecutor.AbortPolicy());

    public BrokerController(BrokerConfig brokerConfig,MessageStoreConfig messageStoreConfig) throws Exception {
        this.brokerConfig = brokerConfig;
        this.messageStoreConfig = messageStoreConfig;

        TopicPersist topicPersist = new TopicPersist(StorePathConfigHelper.getTopicStorePath(this.messageStoreConfig.getStorePathRootDir()));
        topicPersist.load();
        this.topicQueue = topicPersist.getTopicQueue();
        topicPersist.start();

        ConsumeQueuePersist consumeQueuePersist = new ConsumeQueuePersist(StorePathConfigHelper.getConsumeOffsetStorePath(this.messageStoreConfig.getStorePathRootDir()));
        consumeQueuePersist.load();
        this.consumeQueue = consumeQueuePersist.getConsumeQueue();
        consumeQueuePersist.start();

        this.messageStore = new DefaultMessageStore(this.messageStoreConfig);
        this.messageStore.start();
//        this.heartbeatCheck(); //调试时去除
    }

    // todo 统计服务，admin界面

    // raft构建broker集群后，同组broker的broker名字相同，broker id不相同，格式:broker名字：broker id
    /***
     *  第1组broker集群 --> broker_a：broker_a_1(master), broker_a_2(slave1) , broker_a_3(slave2) 至少三台
     *  第2组broker集群 --> broker_b：broker_b_1(master), broker_b_2(slave1) , broker_b_3(slave2) 至少三台
     *
     *  6台broker服务器通过raft算法构建集群，并注册到zk
     *
     *  client端订阅zk,实时watch broker路由信息，并维持到内存Map<brokerName,List<Broker>>
     *
     *  最简单的路由信息就是如上map，但是后面肯定会越来越复杂，因为需要记录broker的一些状态信息，比如哪台机器上次发送失败，需要进行故障规避
     *  再比如我们需要统计broker的一些信息，内存，cpu，消息量，qps，tps，来决定此次发送走哪台机器等等
     *
     *  消息发送：从主broker中基于负载均衡算法选一台发送，默认轮询，可以交给用户自定义负载均衡算法，后续需要故障规避等更复杂的内容
     *
     *  消息消费：消息量不大的时候直接订阅全部的master，如果某组broker集群master节点消息量大，则转而订阅从节点
     *
     *  能实现消息主动推吗？
     *  从我的mq设计来说，不是很好实现，假设一个topic有两个消费者c1,c2
     *  也就是说broker需要维护消费者的信息，再基于负载均衡推送给c1,c2
     *  需要这样处理：消费者连上broker后，需要上报告诉broker，我是哪个消费组，订阅了哪些topic，然后broker需要维护这一大串信息，非常麻烦
     *  这可能也是rocket mq的push其实也是基于pull实现的原因之一
     *  所以我也不打算实现消息的实时推模式
     *
     *  使用zookeeper代替namesrv做路由发现：
     *  broker需要上报自己的brokerName,brokerId,ip,port,角色
     *  client需要订阅zookeeper，维护所有的broker信息，并实现watch动态感知broker更新
     *
     */

    private void heartbeatCheck(){
        scheduledExecutorService.scheduleAtFixedRate(()-> {

            Iterator<Map.Entry<String, Long>> it = heartBeatMap.entrySet().iterator();
            while(it.hasNext()){
                Map.Entry<String, Long> next = it.next();
                String key = next.getKey();
                long lastTime = next.getValue();
                if(System.currentTimeMillis()-lastTime > heartbeatTimout){
                    it.remove();
                    String[] split = key.split(MessageConst.COLON);

                    String topic_group = split[0];
                    String consumer = split[1];

                    Map<String,List<Integer>> consumerMap = consumeQueue.get(topic_group);
                    List<Integer> removeList = consumerMap.remove(consumer);

                    if(consumerMap.isEmpty()){
                        consumeQueue.remove(topic_group);

                        //移除topicQueue的 group
                        String[] split1 = topic_group.split(MessageConst.AT);
                        String topic = split1[0];
                        String group = split1[1];
                        List<MessageQueue> messageQueues = topicQueue.get(topic);
                        Iterator<MessageQueue> iterator = messageQueues.iterator();
                        while(iterator.hasNext()){
                            MessageQueue messageQueue = iterator.next();
                            messageQueue.removeOffsetByGroup(group);
                        }
                    }else{
                        if(removeList.isEmpty()){
                            return;
                        }
                        //把被删除的消息队列累加给没有过期的消费者
                        int ava = removeList.size() / consumerMap.size();
                        int remain = removeList.size() % consumerMap.size();
                        int i = 0;
                        boolean first = true;

                        Iterator<List<Integer>> iterator = consumerMap.values().iterator();
                        while(iterator.hasNext()){
                            List<Integer> existMqs = iterator.next();
                            if(i>=removeList.size()){
                                return;
                            }
                            if(first&&remain>0){
                                existMqs.addAll(removeList.subList(i,i+remain));
                                i += remain;
                            }
                            if(ava>0){
                                existMqs.addAll(removeList.subList(i,i+ava));
                                i = i+ava;
                            }
                            first = false;
                        }
                    }
                }
            }

        }, 0, 1, TimeUnit.SECONDS);
    }


    public SendResult produce(Message msg) {
        return produce(msg,null);
    }

    public SendResult produce(Message msg, Integer queueId) {
        String topic = msg.getTopic();
        createTopic(topic);

        List<MessageQueue> messageQueues = topicQueue.get(topic);
        MessageQueue select;
        if(null==queueId){
             select = messageSelector.select(messageQueues, msg, null);
        }else{
            select = messageQueues.get(queueId);
        }

        MessageExt messageExt = new MessageExt();
        messageExt.setTopic(topic);
        messageExt.setProperties(msg.getProperties());
        messageExt.setBody(msg.getBody());
        messageExt.setQueueId(select.getQueueId());
        messageExt.setQueueOffset(select.getProduceOffset());
        messageExt.setBrokerName(select.getBrokerName());
        String msgId = UUID.randomUUID().toString();
        messageExt.setMsgId(msgId);

        PutMessageResult putMessageResult = messageStore.putMessage(messageExt);

        SendResult sendResult = SendResult.builder().msgId(msgId).messageQueue(select).sendStatus(SendStatus.SEND_FAIL).build();

        if (putMessageResult.getPutMessageStatus() == PutMessageStatus.PUT_OK) {
            select.incrProduceOffset();
            sendResult.setSendStatus(SendStatus.SEND_OK);
        }
        return sendResult;
    }

    public void createTopic(String topic){
        if(null==topicQueue.get(topic)){
            segmentLock.lock(topic);
            try{
                if(null==topicQueue.get(topic)){
                    Integer topicQueueNum = brokerConfig.getTopicQueueNum();
                    List<MessageQueue> messageQueues = new ArrayList<>();
                    for(int i=0;i<topicQueueNum;i++){
                        messageQueues.add(new MessageQueue(topic,brokerConfig.getBrokerName(),i));
                    }
                    topicQueue.put(topic,messageQueues);
                }
            }finally {
                segmentLock.unlock(topic);
            }
        }

    }

    private String buildHeartbeatKey(String topic,String group,String consumer){
        return topic+ MessageConst.AT +group + MessageConst.COLON +consumer;
    }

    private String buildKey(String topic,String group){
        return topic+ MessageConst.AT +group;
    }

    private void putAllQueueToGroup(String topic,String group,String consumer){
        List<MessageQueue> messageQueues = topicQueue.get(topic);
        List<Integer> indexs = new ArrayList<>();

        for(int i=0;i<messageQueues.size();i++){
            MessageQueue messageQueue = messageQueues.get(i);

            //不同group不共享 consume offset
            Map<String, AtomicLong> consumeOffsetMap = messageQueue.getConsumeOffsetMap();
            consumeOffsetMap.put(group,new AtomicLong(0));

            Map<String, Set<Long>> consumeNoAckOffsetMap = messageQueue.getConsumeNoAckOffsetMap();
            consumeNoAckOffsetMap.put(group,new TreeSet<>());

            //不同group 共享生产偏移量
            Map<String, AtomicLong> produceOffsetMap = messageQueue.getProduceOffsetMap();

            AtomicLong pLong = new AtomicLong(0);
            if(!produceOffsetMap.isEmpty()){
                pLong = produceOffsetMap.values().iterator().next();
                produceOffsetMap.remove(MessageQueue.group);
            }
            produceOffsetMap.put(group,pLong);


            indexs.add(i);
        }

        Map<String,List<Integer>>  consumerQueues = new ConcurrentHashMap<>();
        consumerQueues.put(consumer,indexs);
        consumeQueue.put(buildKey(topic,group),consumerQueues);
    }

    public void createTopic(String topic,String group,String consumer){
        createTopic(topic);
        String consumerKey = buildKey(topic,group);

        if(null==consumeQueue.get(consumerKey)){
            segmentLock.lock(consumerKey);
            try {
                if(null==consumeQueue.get(consumerKey)){
                    putAllQueueToGroup(topic,group,consumer);
                }
            }finally {
                segmentLock.unlock(consumerKey);
            }
        }else{
            if(!consumeQueue.get(consumerKey).keySet().contains(consumer)){
                segmentLock.lock(consumerKey);
                try {
                    if(!consumeQueue.get(consumerKey).keySet().contains(consumer)){
                        reBalance(consumerKey,consumer);
                    }
                }finally {
                    segmentLock.unlock(consumerKey);
                }
            }
        }
    }

    private void reBalance(String consumerKey,String consumer){
        //有多少消费者
        Map<String,List<Integer>> consumerQueues = consumeQueue.get(consumerKey);

        if(consumerQueues.size() == brokerConfig.getTopicQueueNum()){
            return;
        }
        int consumerNum = consumerQueues.size() + 1;

        List<Integer> allMessageQueue = new ArrayList<>();
        for (List<Integer> value : consumerQueues.values()) {
            allMessageQueue.addAll(value);
        }

        List<Integer> list = partition(consumerNum,allMessageQueue.size());

        int i = 0;
        int offset = 0;
        int limit = list.get(i);

        for (String key : consumerQueues.keySet()) {
            consumerQueues.put(key,allMessageQueue.subList(offset,offset+limit));
            i++;
            offset = offset + limit;
            limit = list.get(i);
        }
        consumerQueues.put(consumer,allMessageQueue.subList(offset,offset+limit));
    }

    /**
     * 3 8 -> 计算出 3 3 2
     * 3 4 -> 计算出 2 1 1
     * @param consumerNum 消费者数量
     * @param queueNum 消息队列数量
     * @return
     */
    private List<Integer> partition(Integer consumerNum,Integer queueNum){
        List<Integer> list = new ArrayList<>();

        int ava = queueNum / consumerNum;
        int remain = queueNum % consumerNum;

        for(int i=consumerNum;i>0;i--){
            if(remain>0){
                list.add(ava+1);
                remain--;
            }else{
                list.add(ava);
            }
        }

        return list;
    }

    public List<MessageExt> consume(PullMessage pullMessage) {
        return consume(pullMessage,false);
    }

    public List<MessageExt> consume(PullMessage pullMessage,boolean autoAck) {
        return consume(pullMessage.getTopic(),pullMessage.getGroup(),pullMessage.getConsumer(),pullMessage.getExpr(),autoAck);
    }
    @SneakyThrows
    public List<MessageExt> consume(String topic, String group, String consumer,String expr,boolean autoAck) {
        if(StringUtils.isEmpty(topic)|| StringUtils.isEmpty(group)){
            return null;
        }

        heartBeatMap.put(buildHeartbeatKey(topic,group,consumer),System.currentTimeMillis());

        createTopic(topic,group,consumer);

        List<Integer> queueIndexs = consumeQueue.get(buildKey(topic,group)).get(consumer);
        if(null==queueIndexs){
            return null;
        }

        List<CompletableFuture<List<MessageExt>>> futures = new ArrayList<>();
        //并发消费多个队列
        for (Integer index : queueIndexs) {
            CompletableFuture<List<MessageExt>> future = CompletableFuture.supplyAsync(()->{
                List<MessageExt> messageExts = new ArrayList<>();
                MessageQueue messageQueue = topicQueue.get(topic).get(index);
                if(!messageQueue.hasMsg(group)){
                    return Collections.EMPTY_LIST;
                }
                List<Long> consumeOffsets = messageQueue.getConsumeOffset(group,autoAck);
                for (Long consumeOffset : consumeOffsets) {

                    SelectMappedBufferResult commitLog = messageStore.selectCommitLog(messageQueue.getTopic(),messageQueue.getQueueId(),consumeOffset);

                    MessageExt decode = MessageDecoder.decode(commitLog.getByteBuffer());

                    boolean filter = this.delayMessageFilter.filter(decode.getDelayTime());
                    if(filter){
                        //把消息加入 no ack队列
                        messageQueue.addNoAckConsumeOffset(group,consumeOffset);
                        continue;
                    }

                    filter = this.tagMessageFilter.filter(expr,decode.getTags(),decode.getProperties());
                    if(filter){
                        //把消息加入 no ack队列
                        messageQueue.addNoAckConsumeOffset(group,consumeOffset);
                        continue;
                    }

                    messageExts.add(decode);
                }
                return messageExts;
            },exec);
            futures.add(future);
        }

        List<MessageExt> messageExts = new ArrayList<>();
        for (CompletableFuture<List<MessageExt>> future : futures) {
            messageExts.addAll(future.get());
        }
        return messageExts;
    }

    public void ack(AckMessage ackMessage) {
        ack(ackMessage.getTopic(),ackMessage.getGroup(),ackMessage.getQueueIndex(),ackMessage.getConsumeOffsets());
    }

    public void ack(String topic, String group, int queueIndex,List<Long> consumeOffsets) {
        MessageQueue messageQueue = topicQueue.get(topic).get(queueIndex);
        for (Long consumeOffset : consumeOffsets) {
            //把offset从 no ack map中移除
            messageQueue.removeNoAckConsumeOffset(group,consumeOffset);
        }
    }

    public TopicPublishInfo queryTopic(String topic) {
        createTopic(topic);
        List<MessageQueue> messageQueues = topicQueue.get(topic);
        return new TopicPublishInfo(topic,messageQueues);
    }


    public void tobeMaster(boolean isMaster) {
        this.master = isMaster;
        //brokerName,brokerId,ip,host,角色 注册到zk
        RegistryService registry = RegistryFactory.getRegistry(this.brokerConfig.getRegisterType(), this.brokerConfig.getRegisterAddr());
        String path = CommonConstants.SLASH + brokerConfig.getBrokerName() +CommonConstants.SLASH + brokerConfig.getBrokerId();

        BrokerInfo brokerInfo = new BrokerInfo(brokerConfig.getBrokerName(),brokerConfig.getBrokerId(),brokerConfig.getHost(),brokerConfig.getPort(),this.master);
        registry.register(path, JSONObject.toJSONString(brokerInfo));
    }
}
