/*
 * Copyright 2021 TiKV Project Authors.
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 * http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 *
 */

package com.webank.tikv.region;

import com.google.common.util.concurrent.ListenableFuture;
import com.google.protobuf.ByteString;
import com.webank.tikv.AbstractGRPCClient;
import com.webank.tikv.TiConfiguration;
import com.webank.tikv.exception.GrpcException;
import com.webank.tikv.kvproto.Kvrpcpb;
import com.webank.tikv.kvproto.Metapb;
import com.webank.tikv.kvproto.TikvGrpc;
import com.webank.tikv.util.BackOffer;
import com.webank.tikv.util.ChannelFactory;
import io.grpc.ManagedChannel;
import org.apache.logging.log4j.LogManager;
import org.apache.logging.log4j.Logger;

import java.util.LinkedList;
import java.util.List;
import java.util.concurrent.TimeUnit;

import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;

public abstract class AbstractRegionStoreClient
    extends AbstractGRPCClient<TikvGrpc.TikvBlockingStub, TikvGrpc.TikvFutureStub>
    implements RegionErrorReceiver {

    private static final Logger LOGGER = LogManager.getLogger(AbstractRegionStoreClient.class);

    protected final RegionManager regionManager;
    protected TiRegion region;
    protected TiStore store;

    protected AbstractRegionStoreClient(TiConfiguration conf, TiRegion region, TiStore store,
                                        ChannelFactory channelFactory,
                                        TikvGrpc.TikvBlockingStub blockingStub, TikvGrpc.TikvFutureStub asyncStub,
                                        RegionManager regionManager) {
        super(conf, channelFactory, blockingStub, asyncStub);

        checkNotNull(region, "Region is empty");
        checkNotNull(region.getLeader(), "Leader Peer is null");
        checkArgument(region.getLeader() != null, "Leader Peer is null");

        this.region = region;
        this.store = store;
        this.regionManager = regionManager;
    }

    @Override
    public TiRegion getRegion() {
        return region;
    }

    @Override
    public TiStore getStore() {
        return store;
    }

    @Override
    protected TikvGrpc.TikvBlockingStub getBlockingStub() {
        return blockingStub.withDeadlineAfter(getTimeout(), TimeUnit.MILLISECONDS);
    }

    @Override
    protected TikvGrpc.TikvFutureStub getAsyncStub() {
        return asyncStub.withDeadlineAfter(getTimeout(), TimeUnit.MILLISECONDS);
    }

    @Override
    public boolean onNotLeader(TiRegion newRegion, BackOffer backOffer) {
        // 当region epoch不等时, 说明不仅store发生变换
        if (!region.getRegionEpoch().equals(newRegion.getRegionEpoch())) {
            return true;
        }

        region = newRegion;
        store = regionManager.getStoreById(newRegion.getLeader().getStoreId(), backOffer);
        updateClientStub();
        return true;
    }

    @Override
    public boolean onStoreUnreachable(BackOffer backOffer) {
        if (!store.isValid()) {
            LOGGER.warn("store:{} has been invalid", store.getId());
            store = regionManager.getStoreById(store.getId(), backOffer);
            updateClientStub();
            return true;
        }

        // seek an available leader store to send request
        backOffer.checkTimeout();
        try {
            return seekLeaderStore(backOffer);
        } catch (RuntimeException e) {
            LOGGER.warn("seek leader store ex:{}", e.getMessage());
        }
        return false;
    }

    private void updateClientStub() {
        ManagedChannel channel = channelFactory.getChannel(store.getStore().getAddress());
        blockingStub = TikvGrpc.newBlockingStub(channel).withDeadlineAfter(timeout, TimeUnit.MILLISECONDS);
        asyncStub = TikvGrpc.newFutureStub(channel).withDeadlineAfter(timeout, TimeUnit.MILLISECONDS);
    }

    // todo: review
    private boolean seekLeaderStore(BackOffer backOffer) {
        List<Metapb.Peer> peers = region.getFollowers();
        if (peers.isEmpty()) {
            // no followers available, retry
            LOGGER.warn("no followers of region:{} available", region.getId());
            regionManager.invalidRegion(region);
            return false;
        }

        LOGGER.warn("try switch leader: region:{}", region.getId());
        Metapb.Peer peer = switchLeaderStore(backOffer);
        if (peer != null) {
            TiStore currentLeaderStore = regionManager.getStoreById(peer.getStoreId(), backOffer);
            if (currentLeaderStore.isReachable()) {
                LOGGER.info("update leader using switchLeader logic from store:{} to store:{}", region.getLeader().getStoreId(), peer.getStoreId());
                TiRegion result = regionManager.updateLeader(region, peer.getStoreId());
                if (result != null) {
                    region = result;
                    // switch to leader store
                    store = currentLeaderStore;
                    updateClientStub();
                    return true;
                }
                return false;
            }
        }
        // no leader found, some response does not return normally, there may be network partition.
        LOGGER.warn("leader for region:{} is not found, it is possible that network partition occurred", region.getId());
        return false;
    }

    // first: leader peer, second: true if any responses returned with grpc error
    private Metapb.Peer switchLeaderStore(BackOffer backOffer) {
        List<SwitchLeaderTask> responses = new LinkedList<>();
        for (Metapb.Peer peer : region.getFollowers()) {
            ByteString key = region.getStartKey();
            try {
                TiStore peerStore = regionManager.getStoreById(peer.getStoreId(), backOffer);
                ManagedChannel channel = channelFactory.getChannel(peerStore.getAddress());
                TikvGrpc.TikvFutureStub stub = TikvGrpc.newFutureStub(channel)
                    .withDeadlineAfter(timeout, TimeUnit.MILLISECONDS);
                Kvrpcpb.RedisRequest request = Kvrpcpb.RedisRequest.newBuilder()
                    .setContext(region.makeLeaderContext())
                    .setCmd("GET")
                    .setKey(key)
                    .build();
                ListenableFuture<Kvrpcpb.RedisResponse> task = stub.redisExec(request);
                responses.add(new SwitchLeaderTask(task, peer));
            } catch (Exception e) {
                LOGGER.warn("switch region:{} leader store to:{} failed:{}",
                    region.getId(), peer.getStoreId(), e);
            }
        }

        while (true) {
            try {
                Thread.sleep(2);
            } catch (InterruptedException e) {
                throw new GrpcException(e);
            }
            List<SwitchLeaderTask> unfinished = new LinkedList<>();
            for (SwitchLeaderTask task : responses) {
                if (!task.task.isDone()) {
                    unfinished.add(task);
                    continue;
                }
                try {
                    Kvrpcpb.RedisResponse resp = task.task.get();
                    if (resp != null && resp.hasRegionError()) {
                        // the peer is leader
                        LOGGER.warn("response indicates peer:{} is leader", task.peer.getId());
                        return task.peer;
                    }
                } catch (Exception ignored) {
                }
            }
            if (unfinished.isEmpty()) {
                return null;
            }
            responses = unfinished;
        }
    }

    private static class SwitchLeaderTask {
        private final ListenableFuture<Kvrpcpb.RedisResponse> task;
        private final Metapb.Peer peer;

        private SwitchLeaderTask(ListenableFuture<Kvrpcpb.RedisResponse> task, Metapb.Peer peer) {
            this.task = task;
            this.peer = peer;
        }
    }
}
