package org.example.frw.canal.listener.mq.kafka;

import cn.hutool.core.collection.CollUtil;
import cn.hutool.core.map.MapUtil;
import cn.hutool.core.text.CharSequenceUtil;
import cn.hutool.core.util.HashUtil;
import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.example.frw.canal.EntityComposite;
import org.example.frw.canal.HashValue;
import org.example.frw.canal.binlog.BaseCanalBinlog;
import org.example.frw.canal.config.TopicConcurrentlySetting;
import org.springframework.kafka.support.Acknowledgment;

import java.time.Duration;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.concurrent.*;
import java.util.function.Consumer;

@Slf4j
public abstract class BaseConsumer<T extends HashValue> {


    protected static final String BATCH_CONTAINER_FACTORY = "canalBatchContainerFactory";

    protected final Map<Integer, DispatcherQueue> queueMap;
    protected final Integer concurrentlyLevel;
    protected final String topic;
    protected final String group;
    private final ExecutorService executorService = Executors.newSingleThreadExecutor();

    public BaseConsumer(TopicConcurrentlySetting topicConcurrentlySetting, String topic,String group){
        log.info("BaseRocketHashRouterConsumer initial. topic={}",topic);
        this.group = group;
        this.topic = topic;
        // 确定消费并发度，默认是4
        concurrentlyLevel = topicConcurrentlySetting.getTopicConcurrentlyLevel(topic);
        log.info("concurrentlyLevel={}",concurrentlyLevel);
        // 创建有界阻塞队列
        Map<Integer, DispatcherQueue> queueMapTmp = new HashMap<>();
        for(int i=0;i<concurrentlyLevel;i++){
            DispatcherQueue queue = new DispatcherQueue(this::doProcess, i);
            queueMapTmp.put(i,queue);
        }
        queueMap = MapUtil.unmodifiable(queueMapTmp);
        log.info("queue initial done. {}",queueMap.keySet());
    }

    protected abstract void doProcess(EntityComposite<? extends HashValue> messageExt);
    protected abstract List<EntityComposite<T>> hashIndex(int concurrentlyLevel, ConsumerRecord<String,String> consumerRecord);

    /**
     * 10秒钟之后再消费，这种策略称之为背压
     * @param size size
     * @param acknowledgment acknowledgment
     */
    private static void nackSleep(Integer size, Acknowledgment acknowledgment) {
        acknowledgment.nack(size - 1,Duration.ofSeconds(10));
    }

    public void consumeMessage(List<ConsumerRecord<String, String>> messages, Acknowledgment acknowledgment){
        log.info("收到消息！size={}, topic={}",messages.size(),topic);
        // k-> index ，v-> size
        Map<Integer,Integer> map = new HashMap<>();
        for(ConsumerRecord<String,String> message: messages){
            // 将message的data字段平铺，再分发
            List<EntityComposite<T>> entityComposites = hashIndex(concurrentlyLevel, message);
            if (CollUtil.isEmpty(entityComposites)) {
                continue;
            }
            for (EntityComposite<T> composite:entityComposites){
                int index = this.offer(composite);
                if(index < 0){
                    log.error("队列已满，暂时无法消费，挂起当前队列10秒 {}",Math.abs(index));
                    // 如果队列无法添加，说明压力过大，先不要消费,本批次不commit，但是有可能有数据会被下次重复消费，要做幂等
                    // 背压
                    nackSleep(messages.size(), acknowledgment);
                    return;
                }
                map.putIfAbsent(index, 0);
                map.put(index,map.get(index) + 1);
            }
        }
        // 到这里，数据都已经offer,快的可能已经处理完毕了，但是还是要只有这一批都处理成功才认为成功
        Callable<Boolean> callable = () -> {
            try {
                return map.entrySet().stream().allMatch(entry->{
                    Integer key = entry.getKey();
                    Integer value = entry.getValue();
                    return this.done(key,value);
                });
            }catch (Exception e){
                log.error("作业检查失败",e);
            }
            return false;
        };

        FutureTask<Boolean> futureTask = new FutureTask<>(callable);
        executorService.submit(futureTask);
        try {
            // 超时检查。如果在指定时间内依然没完成，大概率发生了失败，因为done检查时阻塞的时间过长，配额不够
            Boolean success = futureTask.get(10L, TimeUnit.SECONDS);
            if(success){
                // 只有这一批都处理成功才认为成功
                log.info("消费成功！size={}",messages.size());
                acknowledgment.acknowledge();
                return;
            } else {
                log.error("超时仍未处理完毕！！！挂起当前队列10秒");
            }
        } catch (Exception e) {
            log.info("消费异常，挂起当前队列10秒",e);
        } finally {
            this.reset();
        }
        // 背压
        nackSleep(messages.size(), acknowledgment);
    }

    public int offer(EntityComposite<T> entityComposite){
        Integer index = entityComposite.getPartition();
        boolean offered = queueMap.get(index).offer(entityComposite);
        return offered? index: -index;
    }

    /**
     * 强制释放锁
     */
    public void reset() {
        queueMap.forEach((k, v) -> {
            v.reset();
        });
    }

    /**
     * partition中的size个数据是否已经处理完毕
     * @param index partition
     * @param size size
     */
    public boolean done(int index,int size){
        return queueMap.get(index).done(size);
    }

    public static class DispatcherQueue{
        private final BlockingQueue<EntityComposite<? extends HashValue>> queue ;
        private final ExecutorService executorService ;
        private volatile boolean exitFlag = false;
        private final Semaphore semaphore = new Semaphore(0);
        private final Integer index;

        public DispatcherQueue(Consumer<EntityComposite<? extends HashValue>> consumer, int index) {
            this.index = index;
            queue = new ArrayBlockingQueue<>(500);
            executorService = Executors.newSingleThreadExecutor();
            executorService.submit(() -> {
                while (!exitFlag){
                    EntityComposite<? extends HashValue> entityComposite = null;
                    try {
                        // 没有数据会阻塞
                        entityComposite = queue.take();
                    } catch (InterruptedException e) {
                        log.error("queue take error",e);
                        Thread.currentThread().interrupt();
                    }

                    if(entityComposite != null){
                        try {
                            consumer.accept(entityComposite);
                            // 处理成功才释放
                            semaphore.release();
                        } catch (Exception e) {
                            // 如果失败了，semaphore不会release，在进行作业检查的时候，就会有少一个作业无法完成
                            // 会阻塞在done调用
                            log.error("Error processing message: message={}" , entityComposite, e);
                        }
                    }else {
                        log.error("skipped！messageExt = null");
                    }
                }
            });
            // 要销毁线程
            Runtime.getRuntime().addShutdownHook(new Thread(this::shutdown));
        }

        public void reset() {
            semaphore.drainPermits();
        }

        public boolean offer(EntityComposite<? extends HashValue> entityComposite){
            try {
                return queue.offer(entityComposite,5L,TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                log.error("DispatcherQueue put error");
                Thread.currentThread().interrupt();
            }
            return false;
        }

        /**
         * 如果某个partition的size个数据没都处理完，就会阻塞住，直到数据处理完毕
         * @param size 完成的数量
         */
        public boolean done(int size){
            try {
                // 获取不到许可，则阻塞。说明任务没完成
                return semaphore.tryAcquire(size, 10L, TimeUnit.SECONDS);
            } catch (InterruptedException e) {
                log.error("DispatcherQueue done error. size={}",size,e);
                Thread.currentThread().interrupt();
                throw new RuntimeException("DispatcherQueue done error");
            }
        }

        // 假设在某个适当的地点，例如在关闭应用时调用
        public void shutdown() {
            exitFlag = true;
            executorService.shutdown();
            log.info("DispatcherQueue {} shutdown successfully", index);
        }
    }

    protected static <E extends HashValue> List<EntityComposite<E>> getEntityComposites(int concurrentlyLevel, BaseCanalBinlog<E> bean) {
        List<EntityComposite<E>> composites = new ArrayList<>();
        if (bean.isDdl()) {
            log.info("skipped！！！ ddl not supported");
            return composites;
        }
        String type = bean.getType();
        boolean isUpdate = CharSequenceUtil.equals(type, "UPDATE");
        for (int i = 0; i < bean.getData().size(); i++) {
            EntityComposite<E> entityComposite = new EntityComposite<>();
            E entity = bean.getData().get(i);
            if (isUpdate) {
                E oldEntity = bean.getOld().get(i);
                entityComposite.setOldEntity(oldEntity);
            }
            entityComposite.setPartition(HashUtil.additiveHash(entity.hashValue(), concurrentlyLevel));
            entityComposite.setEntity(entity);
            entityComposite.setType(bean.getType());
            entityComposite.setDdl(bean.isDdl());
            entityComposite.setDatabase(bean.getDatabase());
            entityComposite.setTable(bean.getTable());
            composites.add(entityComposite);
        }
        return composites;
    }

}