package io.kiki.sba.registry.server.data_store.change;

import com.google.common.collect.Lists;
import io.kiki.sba.registry.api.Channel;
import io.kiki.sba.registry.api.Server;
import io.kiki.sba.registry.api.exchange.DataExchanger;
import io.kiki.sba.registry.common.model.Node.NodeType;
import io.kiki.sba.registry.common.model.TraceTimes;
import io.kiki.sba.registry.common.model.Tuple;
import io.kiki.sba.registry.common.model.dataserver.Datum;
import io.kiki.sba.registry.common.model.dataserver.DatumVersion;
import io.kiki.sba.registry.common.model.sessionserver.DataChangeRequest;
import io.kiki.sba.registry.common.model.sessionserver.DataPushRequest;
import io.kiki.sba.registry.common.model.store.Publisher;
import io.kiki.sba.registry.common.model.store.SubDatum;
import io.kiki.sba.registry.server.data_store.bootstrap.MultiClusterDataServerConfig;
import io.kiki.sba.registry.server.data_store.bootstrap.ServerConfig;
import io.kiki.sba.registry.server.data_store.cache.DatumStorageDelegate;
import io.kiki.sba.registry.server.shared.util.DatumUtils;
import io.kiki.sba.registry.store.api.config.DefaultCommonConfig;
import io.kiki.sba.registry.task.FastRejectedExecutionException;
import io.kiki.sba.registry.task.KeyedThreadPoolExecutor;
import io.kiki.sba.registry.util.CollectionUtils;
import io.kiki.sba.registry.util.ConcurrentUtils;
import io.kiki.sba.registry.util.LoopExecuteTask;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;

import java.util.*;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;

import static io.kiki.sba.registry.server.data_store.change.ChangeMetrics.*;


public class DataChangeEventCenter {
    private static final Logger logger = LoggerFactory.getLogger(DataChangeEventCenter.class);
    private final Map<String, DataChangeMerger> dataCenter2Changes = new ConcurrentHashMap<>();
    private final ReadWriteLock lock = new ReentrantReadWriteLock();
    private final LinkedList<ChangeNotifierRetry> retryNotifiers = new LinkedList<>();
    private final Map<String, Map<String, Datum>> dataCenter2TempChanges = new ConcurrentHashMap<>();
    private final ReadWriteLock tempLock = new ReentrantReadWriteLock();
    private final TempChangeMerger tempChangeMerger = new TempChangeMerger();
    private final ChangeMerger changeMerger = new ChangeMerger();
    @Autowired
    private ServerConfig serverConfig;
    @Autowired
    private MultiClusterDataServerConfig multiClusterDataServerConfig;
    @Autowired
    private DatumStorageDelegate datumStorageDelegate;
    @Autowired
    private DataExchanger boltDataExchanger;
    @Autowired
    private DefaultCommonConfig defaultCommonConfig;
    private KeyedThreadPoolExecutor notifyExecutor;
    private KeyedThreadPoolExecutor notifyTempExecutor;

    public void init() {
        this.notifyExecutor = new KeyedThreadPoolExecutor("notify", serverConfig.getNotifyExecutorPoolSize(), serverConfig.getNotifyExecutorQueueSize());
        this.notifyTempExecutor = new KeyedThreadPoolExecutor("notifyTemp", serverConfig.getNotifyTempExecutorPoolSize(), serverConfig.getNotifyTempExecutorQueueSize());

        ConcurrentUtils.createDaemonThread("changeMerger", changeMerger).start();
        ConcurrentUtils.createDaemonThread("tempChangeMerger", tempChangeMerger).start();
        logger.info("start DataChange NotifyIntervalMs={}, NotifyTempIntervalMs={}", serverConfig.getNotifyIntervalMillis(), serverConfig.getNotifyTempDataIntervalMillis());
    }

    public void onTempPubChange(Publisher publisher, String dataCenter) {
        Map<String, Datum> changes = dataCenter2TempChanges.computeIfAbsent(dataCenter, k -> new ConcurrentHashMap<>());
        tempLock.readLock().lock();
        try {
            Datum existing = changes.computeIfAbsent(publisher.getDataInfoId(), k -> new Datum(publisher, dataCenter));
            existing.addPublisher(publisher);
        } finally {
            tempLock.readLock().unlock();
        }
    }

    public void onChange(Collection<String> dataInfoIds, DataChangeType dataChangeType, String dataCenter) {
        if (dataInfoIds.isEmpty()) {
            return;
        }
        DataChangeMerger dataChangeMerger = dataCenter2Changes.computeIfAbsent(dataCenter, k -> new DataChangeMerger());
        lock.readLock().lock();
        try {
            dataChangeMerger.addChanges(dataInfoIds, dataChangeType);
        } finally {
            lock.readLock().unlock();
        }
    }

    private void retry(ChangeNotifier changeNotifier) {
        changeNotifier.retryCount++;
        if (changeNotifier.retryCount <= serverConfig.getNotifyRetryTimes()) {
            if (commitRetry(changeNotifier)) {
                CHANGE_RETRY_COUNTER.inc();
                return;
            }
        }
        CHANGE_SKIP_COUNTER.inc();
        logger.warn("skip retry of full, {}", changeNotifier);
    }

    boolean commitRetry(ChangeNotifier changeNotifier) {
        final int maxSize = serverConfig.getNotifyRetryQueueSize();
        final long expireTimestamp = System.currentTimeMillis() + serverConfig.getNotifyRetryBackoffMillis();
        synchronized (retryNotifiers) {
            if (retryNotifiers.size() >= maxSize) {
                // remove first
                retryNotifiers.removeFirst();
            }
            retryNotifiers.add(new ChangeNotifierRetry(changeNotifier, expireTimestamp));
        }
        return true;
    }

    List<ChangeNotifier> getExpires() {
        final List<ChangeNotifier> list = new LinkedList<>();
        final long now = System.currentTimeMillis();
        synchronized (retryNotifiers) {
            final Iterator<ChangeNotifierRetry> iterator = retryNotifiers.iterator();
            while (iterator.hasNext()) {
                ChangeNotifierRetry changeNotifierRetry = iterator.next();
                if (changeNotifierRetry.expireTimestamp <= now) {
                    list.add(changeNotifierRetry.notifier);
                    iterator.remove();
                }
            }
        }
        return list;
    }

    private void notifyTempPub(Channel channel, Datum datum) {
        // has temp pub, need to update the datum.version, we use the cache.datum.version as
        // push.version
        final DatumVersion datumVersion = datumStorageDelegate.updateVersion(datum.getDataCenter(), datum.getDataInfoId());
        if (datumVersion == null) {
            logger.warn("not owns the DataInfoId when temp pub to {},{}", channel, datum.getDataInfoId());
            return;
        }
        Datum existDatum = datumStorageDelegate.get(datum.getDataCenter(), datum.getDataInfoId());
        if (existDatum != null) {
            datum.addPublishers(existDatum.getRegisterIdToPublisherMap());
        }
        datum.setVersion(datumVersion.getValue());
        SubDatum subDatum = DatumUtils.of(datum);
        DataPushRequest request = new DataPushRequest(subDatum);
        logger.info("temp pub to {}, {}", channel, subDatum);
        doNotify(request, channel, serverConfig.getNotifyPort());
    }

    private void doNotify(Object request, Channel channel, int notifyPort) {
        Server server = boltDataExchanger.getServer(notifyPort);
        server.sendSync(channel, request, serverConfig.getRpcTimeoutMillis());
    }

    boolean handleTempChanges(List<Channel> channelList) {
        // first clean the event
        List<Datum> datumList = new ArrayList<>();
        tempLock.writeLock().lock();
        try {
            for (Map<String, Datum> change : dataCenter2TempChanges.values()) {
                datumList.addAll(change.values());
                change.clear();
            }
        } finally {
            tempLock.writeLock().unlock();
        }
        if (datumList.isEmpty()) {
            return false;
        }
        if (channelList.isEmpty()) {
            logger.warn("session conn is empty when temp change");
            return false;
        }
        for (Datum datum : datumList) {
            for (Channel channel : channelList) {
                try {
                    // group by connect && dataInfoId
                    notifyTempExecutor.execute(Tuple.of(datum.getDataInfoId(), channel.getRemoteAddress()), new TempNotifier(channel, datum));
                    CHANGETEMP_COMMIT_COUNTER.inc();
                } catch (FastRejectedExecutionException e) {
                    CHANGETEMP_SKIP_COUNTER.inc();
                    logger.warn("commit notify temp full, {}, {}, {}", channel, datum, e.getMessage());
                } catch (Throwable e) {
                    CHANGETEMP_SKIP_COUNTER.inc();
                    logger.error("commit notify temp failed, {}, {}", channel, datum, e);
                }
            }
        }
        return true;
    }

    boolean handleChanges(List<DataChangeEvent> dataChangeEventList, NodeType nodeType, int notifyPort, boolean errorWhenChannelEmpty) {

        if (org.springframework.util.CollectionUtils.isEmpty(dataChangeEventList)) {
            return false;
        }

        Server server = boltDataExchanger.getServer(notifyPort);
        Map<String, List<Channel>> channelsMap = server.selectAllAvailableChannelsForHostAddress();

        if (channelsMap.isEmpty()) {
            if (errorWhenChannelEmpty) {
                logger.error("{} conn is empty when change", nodeType);
            }
            return false;
        }
        for (DataChangeEvent dataChangeEvent : dataChangeEventList) {
            String dataCenter = dataChangeEvent.getDataCenter();
            if (nodeType == NodeType.data_store) {
                if (serverConfig.isLocalDataCenter(dataCenter)) {
                    dataCenter = defaultCommonConfig.getDefaultClusterId();
                    logger.info("[Notify]dataCenter={}, dataInfoIds={} notify local dataChange to remote.", dataCenter, dataChangeEvent.getDataInfoIds());
                } else {
                    logger.info("[skip]dataCenter={}, dataInfoIds={} change skip to notify remote data.", dataCenter, dataChangeEvent.getDataInfoIds());
                    continue;
                }
            }

            final Map<String, DatumVersion> stringDatumVersionMap = new HashMap<>(dataChangeEvent.getDataInfoIds().size());
            for (String dataInfoId : dataChangeEvent.getDataInfoIds()) {
                DatumVersion datumVersion = datumStorageDelegate.getVersion(dataChangeEvent.getDataCenter(), dataInfoId);
                if (datumVersion != null) {
                    stringDatumVersionMap.put(dataInfoId, datumVersion);
                }
            }
            if (stringDatumVersionMap.isEmpty()) {
                continue;
            }
            for (Map.Entry<String, DatumVersion> entry : stringDatumVersionMap.entrySet()) {
                logger.info("datum change notify: {},{}", entry.getKey(), entry.getValue());
            }
            for (Map.Entry<String, List<Channel>> entry : channelsMap.entrySet()) {
                Channel channel = CollectionUtils.getRandom(entry.getValue());
                try {
                    notifyExecutor.execute(channel.getRemoteAddress(), new ChangeNotifier(channel, notifyPort, dataCenter, stringDatumVersionMap, dataChangeEvent.getTraceTimes()));
                    CHANGE_COMMIT_COUNTER.inc();
                } catch (FastRejectedExecutionException e) {
                    CHANGE_SKIP_COUNTER.inc();
                    logger.warn("commit notify full, {}, {}, {}", channel, stringDatumVersionMap.size(), e.getMessage());
                } catch (Throwable e) {
                    CHANGE_SKIP_COUNTER.inc();
                    logger.error("commit notify failed, {}, {}", channel, stringDatumVersionMap.size(), e);
                }
            }
        }
        return true;
    }

    void handleExpire() {
        final List<ChangeNotifier> changeNotifierList = getExpires();
        // commit retry
        for (ChangeNotifier changeNotifier : changeNotifierList) {
            try {
                notifyExecutor.execute(changeNotifier.channel.getRemoteAddress(), changeNotifier);
                CHANGE_COMMIT_COUNTER.inc();
            } catch (FastRejectedExecutionException e) {
                CHANGE_SKIP_COUNTER.inc();
                logger.warn("commit retry notify full, {}, {}, {}", changeNotifier.channel, changeNotifier.dataInfoIds.size(), e.getMessage());
            } catch (Throwable e) {
                CHANGE_SKIP_COUNTER.inc();
                logger.error("commit retry notify failed, {}, {}", changeNotifier.channel, changeNotifier.dataInfoIds.size(), e);
            }
        }
    }

    List<DataChangeEvent> transferChangeEvent(int maxItems) {
        final List<DataChangeEvent> dataChangeEventList = new ArrayList<>();
        lock.writeLock().lock();
        try {
            for (Map.Entry<String, DataChangeMerger> entry : dataCenter2Changes.entrySet()) {
                final String dataCenter = entry.getKey();
                DataChangeMerger dataChangeMerger = entry.getValue();
                TraceTimes traceTimes = dataChangeMerger.createTraceTime();
                List<List<String>> parts = Lists.partition(new ArrayList<>(dataChangeMerger.getDataInfoIds()), maxItems);
                for (List<String> part : parts) {
                    dataChangeEventList.add(new DataChangeEvent(dataCenter, part, traceTimes));
                }
                dataChangeMerger.clear();
            }
        } finally {
            lock.writeLock().unlock();
        }
        return dataChangeEventList;
    }


    Set<String> getOnChanges(String dataCenter) {
        DataChangeMerger changes = dataCenter2Changes.get(dataCenter);
        return changes == null ? Collections.emptySet() : new HashSet<>(changes.getDataInfoIds());
    }


    Map<String, Datum> getOnTempPubChanges(String dataCenter) {
        Map<String, Datum> changes = dataCenter2TempChanges.get(dataCenter);
        return changes == null ? Collections.emptyMap() : new HashMap<>(changes);
    }


    final class TempNotifier implements Runnable {
        final Channel channel;
        final Datum datum;

        TempNotifier(Channel channel, Datum datum) {
            this.channel = channel;
            this.datum = datum;
        }

        @Override
        public void run() {
            try {
                if (!channel.isConnected()) {
                    CHANGETEMP_FAIL_COUNTER.inc();
                    logger.info("temp change notify failed, conn is closed, {}", channel);
                    return;
                }
                notifyTempPub(channel, datum);
                CHANGETEMP_SUCCESS_COUNTER.inc();
            } catch (Throwable e) {
                CHANGETEMP_FAIL_COUNTER.inc();
                logger.error("failed to notify temp {}, {}", channel, datum, e);
            }
        }
    }

    private final class ChangeNotifierRetry {
        final ChangeNotifier notifier;
        final long expireTimestamp;

        ChangeNotifierRetry(ChangeNotifier notifier, long expireTimestamp) {
            this.notifier = notifier;
            this.expireTimestamp = expireTimestamp;
        }
    }

    final class ChangeNotifier implements Runnable {
        final Channel channel;
        final int notifyPort;
        final String dataCenter;
        final Map<String, DatumVersion> dataInfoIds;
        final TraceTimes times;

        volatile int retryCount;

        private ChangeNotifier(Channel channel, int notifyPort, String dataCenter, Map<String, DatumVersion> dataInfoIds, TraceTimes parentTimes) {
            this.dataCenter = dataCenter;
            this.channel = channel;
            this.notifyPort = notifyPort;
            this.dataInfoIds = dataInfoIds;
            this.times = parentTimes.copy();
            this.times.setDatumNotifyCreate(System.currentTimeMillis());
        }

        @Override
        public void run() {
            try {
                if (!channel.isConnected()) {
                    CHANGE_FAIL_COUNTER.inc();
                    logger.info("change notify failed, conn is closed, {}", channel);
                    return;
                }
                DataChangeRequest request = new DataChangeRequest(dataCenter, dataInfoIds, times);
                request.getTimes().setDatumNotifySend(System.currentTimeMillis());
                doNotify(request, channel, notifyPort);
                logger.info("success to notify {}, {}", channel.getRemoteAddress(), this);
                CHANGE_SUCCESS_COUNTER.inc();
            } catch (Throwable e) {
                CHANGE_FAIL_COUNTER.inc();
                logger.error("failed to notify {}, {}", channel, this, e);
                retry(this);
            }
        }

        int size() {
            int size = 0;
            for (String dataInfoIds : dataInfoIds.keySet()) {
                size += dataInfoIds.length();
            }
            return size;
        }


    }

    private final class TempChangeMerger extends LoopExecuteTask {

        @Override
        public void _execute_() {
            try {
                Server server = boltDataExchanger.getServer(serverConfig.getNotifyPort());
                Map<String, Channel> channelMap = server.selectAvailableChannelsForHostAddress();
                handleTempChanges(new ArrayList<>(channelMap.values()));
            } catch (Throwable e) {
                logger.error("failed to merge temp change", e);
            }
        }

        @Override
        public void _wait_() {
            ConcurrentUtils.sleepUninterruptibly(serverConfig.getNotifyTempDataIntervalMillis(), TimeUnit.MILLISECONDS);
        }
    }

    private final class ChangeMerger extends LoopExecuteTask {

        @Override
        public void _execute_() {
            try {
                // first clean the event
                final int maxItems = serverConfig.getNotifyMaxItems();
                final List<DataChangeEvent> events = transferChangeEvent(maxItems);

                // notify local session
                handleChanges(events, NodeType.client_interface, serverConfig.getNotifyPort(), true);

                // notify remote data
                handleChanges(events, NodeType.data_store, multiClusterDataServerConfig.getSyncRemoteSlotLeaderPort(), false);
                handleExpire();
            } catch (Throwable e) {
                logger.error("failed to merge change", e);
            }
        }

        @Override
        public void _wait_() {
            ConcurrentUtils.sleepUninterruptibly(serverConfig.getNotifyIntervalMillis(), TimeUnit.MILLISECONDS);
        }
    }
}
