/*
 * Licensed to the Apache Software Foundation (ASF) under one
 * or more contributor license agreements.  See the NOTICE file
 * distributed with this work for additional information
 * regarding copyright ownership.  The ASF licenses this file
 * to you under the Apache License, Version 2.0 (the
 * "License"); you may not use this file except in compliance
 * with the License.  You may obtain a copy of the License at
 *
 *     http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */

package org.apache.cassandra.service.accord;

import java.net.InetAddress;
import java.net.UnknownHostException;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.NavigableSet;
import java.util.Objects;
import java.util.Set;
import java.util.TreeSet;
import java.util.concurrent.TimeUnit;
import java.util.stream.Collectors;

import javax.annotation.Nullable;

import com.google.common.collect.ImmutableBiMap;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Sets;
import org.junit.BeforeClass;
import org.junit.Test;

import accord.Utils;
import accord.api.Agent;
import accord.api.MessageSink;
import accord.api.TopologyListener;
import accord.api.TopologySorter;
import accord.impl.TestAgent;
import accord.impl.basic.Pending;
import accord.impl.basic.PendingQueue;
import accord.impl.basic.MonitoredPendingQueue;
import accord.impl.basic.RandomDelayQueue;
import accord.impl.basic.SimulatedDelayedExecutorService;
import accord.impl.mock.MockCluster;
import accord.local.Node;
import accord.primitives.Range;
import accord.primitives.Ranges;
import accord.topology.Topology;
import accord.topology.TopologyManager;
import accord.utils.AccordGens;
import accord.utils.Gen;
import accord.utils.Gens;
import accord.utils.RandomSource;
import accord.utils.SortedArrays.SortedArrayList;
import accord.utils.SortedListSet;
import org.apache.cassandra.concurrent.AdaptingScheduledExecutorPlus;
import org.apache.cassandra.concurrent.ScheduledExecutorPlus;
import org.apache.cassandra.config.DatabaseDescriptor;
import org.apache.cassandra.exceptions.RequestFailure;
import org.apache.cassandra.gms.EndpointState;
import org.apache.cassandra.gms.Gossiper;
import org.apache.cassandra.gms.HeartBeatState;
import org.apache.cassandra.locator.InetAddressAndPort;
import org.apache.cassandra.net.ConnectionType;
import org.apache.cassandra.net.Message;
import org.apache.cassandra.net.MessageDelivery;
import org.apache.cassandra.net.RequestCallback;
import org.apache.cassandra.tcm.ValidatingClusterMetadataService;
import org.apache.cassandra.tcm.serialization.Version;
import org.apache.cassandra.utils.AccordGenerators;
import org.apache.cassandra.utils.ByteBufferUtil;
import org.apache.cassandra.utils.concurrent.Future;
import org.assertj.core.api.Assertions;

import static accord.utils.Property.qt;
import static org.apache.cassandra.simulator.RandomSource.Choices.choose;
import static org.apache.cassandra.utils.AccordGenerators.partitioner;

public class AccordSyncPropagatorTest
{
    @BeforeClass
    public static void setup() throws NoSuchFieldException, IllegalAccessException
    {
        DatabaseDescriptor.daemonInitialization();
        ValidatingClusterMetadataService.createAndRegister(Version.MIN_ACCORD_VERSION);
    }

    @Test
    public void burnTest()
    {
        Gen<Gen<Ranges>> rangesGenGen = partitioner().map(p -> AccordGenerators.ranges(p).filter(r -> !r.isEmpty()));
        Gen<List<Node.Id>> nodesGen = Gens.lists(AccordGens.nodes()).unique().ofSizeBetween(1, 40);
        qt().withExamples(10).check(rs -> {
            // when gossip and cluster metadata don't know an endpoint, retries are avoided (node removed)
            // so when instances are created here they are added to gossip to trick the membership check...
            Gossiper.instance.clearUnsafe();

            SortedArrayList<Node.Id> nodes = SortedArrayList.copyUnsorted(nodesGen.next(rs), Node.Id[]::new);
            SortedListSet<Node.Id> nodesAsSet = SortedListSet.allOf(nodes);

            List<Throwable> failures = new ArrayList<>();
            RandomDelayQueue delayQueue = new RandomDelayQueue.Factory(rs).get();
            PendingQueue queue = new MonitoredPendingQueue(failures, delayQueue);
            Agent agent = new TestAgent.RethrowAgent();
            SimulatedDelayedExecutorService globalExecutor = new SimulatedDelayedExecutorService(queue, agent, null);
            ScheduledExecutorPlus scheduler = new AdaptingScheduledExecutorPlus(globalExecutor);

            final long epochOffset = rs.nextLong(1, 1024);
            Map<Long, Ranges> allRanges = new HashMap<>();
            Cluster cluster = new Cluster(nodes, epochOffset, allRanges, rs, scheduler);

            Gen<Ranges> rangesGen = rangesGenGen.next(rs);
            int numEpochs = rs.nextInt(1, 10);
            Pending.Global.setNoActiveOrigin();
            Set<Object> prefixes = new HashSet<>();
            for (int i = 0; i < numEpochs; i++)
            {
                long epoch = epochOffset + i;
                Ranges ranges;
                {
                    Ranges tmp = rangesGen.next(rs);
                    while (tmp.stream().anyMatch(r -> prefixes.contains(r.prefix())))
                        tmp = rangesGen.next(rs);
                    ranges = tmp;
                }
                ranges.stream().forEach(r -> prefixes.add(r.prefix()));
                allRanges.put(epoch, ranges.mergeTouching());
                scheduler.schedule(() -> {
                    for (Node.Id nodeId : nodes)
                    {
                        cluster.node(nodeId).topology.onReadyToCoordinate(nodeId, epoch);
                        cluster.node(nodeId).propagator.onReadyToCoordinate(epoch, nodes);
                    }

                    for (int j = 0, attempts = rs.nextInt(1, 4); j < attempts; j++)
                    {
                        for (Range range : ranges)
                        {
                            Cluster.Instance inst = cluster.node(choose(rs, nodesAsSet));
                            scheduler.schedule(() -> {
                                Ranges subrange = Ranges.of(range);
                                inst.topology.onEpochClosed(subrange, epoch);
                                inst.propagator.onEpochClosed(subrange, epoch, nodes);
                                scheduler.schedule(() -> {
                                    inst.topology.onEpochRetired(subrange, epoch);
                                    inst.propagator.onEpochRetired(subrange, epoch, nodes);
                                }, 1, TimeUnit.MINUTES);
                            }, rs.nextInt(30, 300), TimeUnit.SECONDS);
                        }
                    }
                }, rs.nextInt(30, 300), TimeUnit.SECONDS);
            }
            Pending.Global.clearActiveOrigin();

            while (queue.size() > 0)
            {
                Runnable next = (Runnable) queue.poll();
                if (next == null)
                    break;
                Pending.Global.setActiveOrigin((Pending)next);
                next.run();
                Pending.Global.clearActiveOrigin();
                if (!failures.isEmpty())
                {
                    RuntimeException e = new RuntimeException("Failures detected");
                    failures.forEach(e::addSuppressed);
                    throw e;
                }
            }
            if (hasPending(cluster))
                throw new AssertionError("Unable to make progress: pending syncs on \n" + cluster.instances.values().stream().filter(i -> i.propagator.hasPending()).map(i -> i.propagator.toString()).collect(Collectors.joining("\n")));

            for (Cluster.Instance inst : cluster.instances.values())
            {
                Cluster.Listener cs = inst.listener;
                assertSetsEqual(cs.completedEpochs, allRanges.keySet(), "completedEpochs %s", inst.id);
                assertSetsEqual(cs.syncCompletes.keySet(), allRanges.keySet(), "syncCompletes %s", inst.id);
                for (Map.Entry<Long, Set<Node.Id>> e : cs.syncCompletes.entrySet())
                    assertSetsEqual(e.getValue(), nodesAsSet, "syncCompletes values on %s", inst.id);

                for (Map.Entry<?, Ranges> e : cs.closed.entrySet())
                    e.setValue(e.getValue().mergeTouching());
                for (Map.Entry<?, Ranges> e : cs.redundant.entrySet())
                    e.setValue(e.getValue().mergeTouching());
                assertMapEquals(cs.closed, allRanges, "Unexpected state for closed on %s", inst.id);
                assertMapEquals(cs.redundant, allRanges, "Unexpected state for redundant on %s", inst.id);
            }
        });
    }

    private static  <T> void assertSetsEqual(Set<T> actual, Set<T> expected, String msg, Object... args)
    {
        Set<T> notExpected = Sets.difference(actual, expected);
        Assertions.assertThat(notExpected).describedAs("Unexpected values detected; " + msg, args).isEmpty();
        Set<T> missing = Sets.difference(expected, actual);
        Assertions.assertThat(missing).describedAs("Missing values detected; " + msg, args).isEmpty();
    }

    private static <K, V> void assertMapEquals(Map<K, V> actual, Map<K, V> expected, String msg, Object... args)
    {
        assertSetsEqual(actual.keySet(), expected.keySet(), msg, args);
        List<String> errors = new ArrayList<>();
        for (Map.Entry<K, V> e : actual.entrySet())
        {
            V value = e.getValue();
            V other = expected.get(e.getKey());
            if (!Objects.equals(value, other))
                errors.add(String.format("Mismatch at key %s: expected %s but given %s", e.getKey(), other, value));
        }
        if (!errors.isEmpty())
            throw new AssertionError(String.join("\n", errors));
    }

    private static boolean hasPending(Cluster cluster)
    {
        return cluster.instances.values().stream().anyMatch(i -> i.propagator.hasPending());
    }

    private static class Cluster implements AccordEndpointMapper
    {
        private final ImmutableBiMap<Node.Id, InetAddressAndPort> nodeToAddress;
        private final ImmutableMap<Node.Id, Instance> instances;
        private final RandomSource rs;
        private final ScheduledExecutorPlus scheduler;

        private Cluster(List<Node.Id> nodes,
                        long minEpoch,
                        Map<Long, Ranges> allRanges,
                        RandomSource rs,
                        ScheduledExecutorPlus scheduler)
        {
            this.rs = rs;
            this.scheduler = scheduler;
            ImmutableBiMap.Builder<Node.Id, InetAddressAndPort> nodeToAddress = ImmutableBiMap.builder();
            ImmutableMap.Builder<Node.Id, Instance> instances = ImmutableMap.builder();
            for (Node.Id id : nodes)
            {
                InetAddressAndPort address = addressFromInt(id.id);
                nodeToAddress.put(id, address);
                Node node = Utils.createNode(id, Topology.EMPTY, new MessageSink.NoOpSink(), new MockCluster.Clock(0));
                TopologyManager topology = new TopologyManager((TopologySorter.StaticSorter)(a,b,c)->0, node, ignore -> { throw new UnsupportedOperationException(); }, null, null);
                Listener cs = new Listener(id, minEpoch, nodes, allRanges);
                Sink sink = new Sink(id);
                FailureWrapper fw = new FailureWrapper(Cluster.this, id);
                AccordSyncPropagator propagator = new AccordSyncPropagator(id, fw, sink, scheduler);
                topology.addListener(propagator);
                topology.addListener(cs);
                propagator.setTestListener(cs);
                instances.put(id, new Instance(id, topology, cs, sink, propagator));
                Gossiper.instance.endpointStateMap.put(address, new EndpointState(HeartBeatState.empty()));
            }
            this.nodeToAddress = nodeToAddress.build();
            this.instances = instances.build();
        }

        private InetAddressAndPort addressFromInt(int value)
        {
            byte[] array = ByteBufferUtil.bytes(value).array();
            try
            {
                InetAddress address = InetAddress.getByAddress(array);
                return InetAddressAndPort.getByAddressOverrideDefaults(address, 1);
            }
            catch (UnknownHostException e)
            {
                throw new AssertionError(e);
            }
        }

        public Instance node(Node.Id id)
        {
            Instance instance = instances.get(id);
            if (instance == null)
                throw new NullPointerException("Unknown id: " + id);
            return instance;
        }

        public Instance node(InetAddressAndPort address)
        {
            return node(mappedIdOrNull(address));
        }

        @Override
        public Node.Id mappedIdOrNull(InetAddressAndPort endpoint, Object ignore)
        {
            return nodeToAddress.inverse().get(endpoint);
        }

        @Override
        public InetAddressAndPort mappedEndpointOrNull(Node.Id id, Object ignore)
        {
            return nodeToAddress.get(id);
        }

        @Override
        public Map<Node.Id, Long> removedNodes()
        {
            return Map.of();
        }

        @Override
        public NodeStatus nodeStatus(Node.Id id)
        {
            throw new UnsupportedOperationException();
        }

        private enum Action
        {
            DELIVER, TIMEOUT, ERROR
        }

        private class Sink implements MessageDelivery
        {
            private final Node.Id from;
            private final Map<Long, RequestCallback<?>> callbacks = new HashMap<>();
            private final Map<InetAddressAndPort, Gen<Action>> nodeActions = new HashMap<>();

            private Sink(Node.Id from)
            {
                this.from = from;
            }

            @Override
            public <REQ> void send(Message<REQ> message, InetAddressAndPort to)
            {
                throw new UnsupportedOperationException();
            }

            @Override
            public <REQ, RSP> void sendWithCallback(Message<REQ> message, InetAddressAndPort to, RequestCallback<RSP> cb)
            {
                Action action = action(to);
                switch (action)
                {
                    case ERROR:
                        cb.onFailure(to, RequestFailure.UNKNOWN);
                        return;
                    case TIMEOUT:
                        cb.onFailure(to, RequestFailure.TIMEOUT);
                        return;
                    case DELIVER:
                        break;
                    default:
                        throw new IllegalStateException("Unknown action: " + action);
                }
                callbacks.put(message.id(), cb);
                scheduler.schedule(() -> AccordService.receive(this, node(to).topology, (Message<AccordSyncPropagator.Notification>) message.withFrom(mappedEndpointOrNull(from))), 500, TimeUnit.MILLISECONDS);
                scheduler.schedule(() -> {
                    RequestCallback<?> removed = callbacks.remove(message.id());
                    if (removed != null)
                        removed.onFailure(to, RequestFailure.TIMEOUT);
                }, 1, TimeUnit.MINUTES);
            }

            @Override
            public <REQ, RSP> void sendWithCallback(Message<REQ> message, InetAddressAndPort to, RequestCallback<RSP> cb, ConnectionType specifyConnection)
            {
                throw new UnsupportedOperationException();
            }

            @Override
            public <REQ, RSP> Future<Message<RSP>> sendWithResult(Message<REQ> message, InetAddressAndPort to)
            {
                throw new UnsupportedOperationException();
            }

            @Override
            public <V> void respond(V response, Message<?> message)
            {
                Action action = action(message.respondTo());
                switch (action)
                {
                    case ERROR:
                    case TIMEOUT:
                        return;
                    case DELIVER:
                        break;
                    default:
                        throw new IllegalStateException("Unknown action: " + action);
                }

                RequestCallback cb = node(message.respondTo()).messagingService.callbacks.remove(message.id());
                if (cb != null)
                    cb.onResponse(message.responseWith(response));
            }

            private Action action(InetAddressAndPort to)
            {
                return nodeActions.computeIfAbsent(to, ignore -> Gens.enums().allWithWeights(Action.class, 81, 10, 1)).next(rs);
            }
        }

        private class FailureWrapper implements AccordEndpointMapper
        {
            private final AccordEndpointMapper wrapped;
            private final Node.Id self;
            private final Map<Node.Id, Gen<Boolean>> nodeRuns = new HashMap<>();

            private FailureWrapper(AccordEndpointMapper wrapped, Node.Id self)
            {
                this.wrapped = wrapped;
                this.self = self;
            }

            @Nullable
            @Override
            public Node.Id mappedIdOrNull(InetAddressAndPort endpoint, @Nullable Object logIdentityIfUnmapped)
            {
                return wrapped.mappedIdOrNull(endpoint, logIdentityIfUnmapped);
            }

            @Nullable
            @Override
            public InetAddressAndPort mappedEndpointOrNull(Node.Id id, @Nullable Object logIdentityIfUnmapped)
            {
                return wrapped.mappedEndpointOrNull(id, logIdentityIfUnmapped);
            }

            @Override
            public NodeStatus nodeStatus(Node.Id id)
            {
                if (self.equals(id)) return NodeStatus.HEALTHY;

                return !nodeRuns.computeIfAbsent(id, ignore -> Gens.bools().biasedRepeatingRuns(.01, rs.nextInt(3, 15))).next(rs) ? NodeStatus.HEALTHY : NodeStatus.UNHEALTHY;
            }

            @Override
            public Map<Node.Id, Long> removedNodes()
            {
                return Map.of();
            }
        }

        private static class Listener implements AccordSyncPropagator.TestListener, TopologyListener
        {
            private final Node.Id self;
            private final long minEpoch;
            private final List<Node.Id> nodes;
            private final Map<Long, Set<Node.Id>> syncCompletes = new HashMap<>();
            private final Map<Long, Set<Node.Id>> endpointAcks = new HashMap<>();
            private final NavigableSet<Long> completedEpochs = Collections.synchronizedNavigableSet(new TreeSet<>());
            private final Map<Long, Ranges> allRanges;
            private final Map<Long, Ranges> closed = new HashMap<>();
            private final Map<Long, Ranges> redundant = new HashMap<>();

            private Listener(Node.Id node, long minEpoch, List<Node.Id> nodes, Map<Long, Ranges> allRanges)
            {
                this.self = node;
                this.minEpoch = minEpoch;
                this.nodes = nodes;
                this.allRanges = allRanges;
            }

            @Override
            public void onRemoteReadyToCoordinate(Node.Id node, long epoch)
            {
                syncCompletes.computeIfAbsent(epoch, ignore -> new HashSet<>()).add(node);
            }

            @Override
            public void onEpochClosed(Ranges ranges, long epoch, @Nullable Topology topology)
            {
                merge(closed, ranges, epoch);
            }

            @Override
            public void onEpochRetired(Ranges ranges, long epoch, @Nullable Topology topology)
            {
                merge(redundant, ranges, epoch);
            }

            private void merge(Map<Long, Ranges> map, Ranges ranges, long epoch)
            {
                while (epoch >= minEpoch)
                {
                    Ranges cur = map.get(epoch);
                    Ranges upd = cur == null ? ranges : cur.with(ranges);
                    if (upd == cur)
                        break;
                    if (cur != null)
                        ranges = ranges.without(cur);
                    ranges = ranges.without(allRanges.get(epoch));
                    map.put(epoch, upd);
                    --epoch;
                }
            }

            @Override
            public void onEndpointAck(Node.Id id, long epoch)
            {
                Set<Node.Id> acks = endpointAcks.computeIfAbsent(epoch, ignore -> new HashSet<>());
                if (acks.add(id) && acks.containsAll(nodes))
                    completedEpochs.add(epoch);
            }
        }

        public class Instance
        {
            private final Node.Id id;
            private final TopologyManager topology;
            private final Listener listener;
            private final Sink messagingService;
            private final AccordSyncPropagator propagator;

            private Instance(Node.Id id,
                             TopologyManager topology,
                             Listener listener,
                             Sink messagingService,
                             AccordSyncPropagator propagator)
            {
                this.id = id;
                this.topology = topology;
                this.listener = listener;
                this.messagingService = messagingService;
                this.propagator = propagator;
            }
        }
    }
}
