package xin.marcher.wind.consistency.election;

import cn.hutool.core.collection.CollectionUtil;
import cn.hutool.core.util.ObjectUtil;
import cn.hutool.crypto.SecureUtil;
import cn.hutool.json.JSONUtil;
import com.google.common.eventbus.EventBus;
import com.google.common.eventbus.Subscribe;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.DisposableBean;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.web.context.WebServerInitializedEvent;
import org.springframework.context.ApplicationListener;
import org.springframework.core.ParameterizedTypeReference;
import org.springframework.http.HttpEntity;
import org.springframework.http.HttpMethod;
import org.springframework.http.HttpStatus;
import org.springframework.http.ResponseEntity;
import org.springframework.stereotype.Component;
import org.springframework.util.StringUtils;
import org.springframework.web.client.ResourceAccessException;
import xin.marcher.wind.consistency.common.CommonRes;
import xin.marcher.wind.consistency.config.PeerNodeConfigProperties;
import xin.marcher.wind.consistency.config.TendConsistencyConfiguration;
import xin.marcher.wind.consistency.enums.PeerOpTypeEnum;
import xin.marcher.wind.consistency.enums.PeerTransportEnum;
import xin.marcher.wind.consistency.manager.TaskScheduleManager;
import xin.marcher.wind.consistency.remote.message.*;
import xin.marcher.wind.consistency.scheduler.SchedulerManager;
import xin.marcher.wind.consistency.sharding.ConsistencyTaskShardingContext;
import xin.marcher.wind.consistency.sharding.ConsistencyTaskShardingHandler;
import xin.marcher.wind.consistency.util.NetUtils;
import xin.marcher.wind.consistency.util.RestTemplateUtils;

import javax.annotation.Resource;
import java.util.*;
import java.util.concurrent.CompletionService;
import java.util.concurrent.locks.Lock;
import java.util.concurrent.locks.ReadWriteLock;
import java.util.concurrent.locks.ReentrantReadWriteLock;
import java.util.stream.Collectors;

/**
 * 集群节点选举及任务分片锁使用的处理器
 *
 * @author marcher
 **/
@Slf4j
@Component
public class PeerElectionHandler implements ApplicationListener<WebServerInitializedEvent>, DisposableBean {

    /**
     * 当前节点的ip地址
     */
    @Value("${server.port}")
    private int currentServerPort;

    /**
     * 集群信息配置
     */
    @Resource
    private PeerNodeConfigProperties peerNodeConfigProperties;

    /**
     * 一致性任务调度管理器
     */
    @Resource
    private TaskScheduleManager taskScheduleManager;

    /**
     * 用于节点启动时发送请求给其他节点，看有没有leader节点，如果有直接注册，没有则按原选举逻辑来搞
     */
    @Resource
    private CompletionService<RegisterOrCancelResponse> peerRegisterPool;

    /**
     * 一致性任务分片处理器
     */
    @Autowired
    private ConsistencyTaskShardingHandler consistencyTaskShardingHandler;
    /**
     * 一致性任务框架配置
     *
     */
    @Resource
    private TendConsistencyConfiguration tendConsistencyConfiguration;

    /**
     * 调度管理器
     */
    @Resource
    private SchedulerManager schedulerManager;

    /**
     * follower回复给leader的心跳响应表 格式： key: PeerId value: HeartbeatResponse
     *
     * leader对应的每个follower，每次收到一个leader心跳，返回了一个响应，leader来说，他拿到的每个响应，都会根据follower peer id
     * key-value写入，覆盖，对于leader来说，就可以检查到每个follower最近一次接收到心跳返回的一个响应
     */
    private Map<String, LeaderToFollowerHeartbeatResponse> heartbeatResponseTable;

    /**
     * 一致性框架集群节点的配置信息
     */
    private List<String> peersConfigList;

    /**
     * 可用于执行分片任务的实例信息 格式：key:peerId, value:ip:port
     */
    private final Map<String, String> availableShardingInstances = new HashMap<>();

    /**
     * follower发送给leader的心跳响应
     */
    private FollowerToLeaderHeartbeatResponse recentlyFollowerToLeaderHeartbeatResponse;

    /**
     * 任务分片上下文
     */
    private final ConsistencyTaskShardingContext consistencyTaskShardingContext = new ConsistencyTaskShardingContext();

    /**
     * 事件总线
     * - @Subscribe注释修饰的方法会处理事件逻辑
     */
    public EventBus eventBus;

    /**
     * 随机数生成器
     */
    private final Random RANDOM = new Random();

    /**
     * 读写锁
     */
    private final ReadWriteLock readWriteLock = new ReentrantReadWriteLock();

    /**
     * 读锁
     */
    private final Lock readLock = this.readWriteLock.readLock();

    /**
     * 写锁
     */
    private final Lock writeLock = this.readWriteLock.writeLock();


    /**
     * 系统只要启动了，就会来触发这个方法的执行和运行，执行 start，完成框架初始化、选举以及发起定时调度的全过程
     */
    @Override
    public void onApplicationEvent(WebServerInitializedEvent event) {
        start();
    }

    @Override
    public void destroy() throws Exception {
        log.info("peerId={}的节点下线", consistencyTaskShardingContext.getCurrentPeerId());
        // 构造节点下线请求，通知集群中的其他节点
        checkIsExistLeaderAndNotifyAddOrCancel(PeerOpTypeEnum.OFFLINE.getOpType());
        log.info("peerId={}的节点下线完毕", consistencyTaskShardingContext.getCurrentPeerId());
    }

    /**
     * 启动
     */
    public void start() {
        // 初始化
        init();
        // 选举并执行一致性任务的分片
        startElectionProcessAndDoTaskSharding();
        // 启动一致性任务执行引擎, 定时扫描 db 和本地的 rocksdb 里的任务执行
        // aop 切面里，如果说提交任务，db 写失败，就自动降级写入到 rocksdb 里去，系统正常在运行，就会自动的去处理本地的 rocksdb 里的任务
        startTaskExecuteEngine();
    }

    private void init() {
        // 解析集群节点地址为list格式
        peersConfigList = parsePeersConfigToList(peerNodeConfigProperties.getPeersConfig());
        // 获取当前节点的peerId
        setCurrentPeerId(peersConfigList);
        // 构建可用于分片的实例map 格式：key:peerId, value:ip:port
        buildAvailableShardingInstances(peersConfigList);
        // 初始化事件总线 用于监听节点上线和下下线、分片完成等事件 详见 @Subscribe
        // 基于内存里的事件总线，在系统jvm内部，基于内存实现的，大家可以基于这个事件总线，发布事件，也可以去监听订阅一个事件
        // PeerElectionHandler自己会去关注事件总线里发布出来的事件
        // 关注什么事件，就得在自己的类里，实现一个方法，@Subscribe 注解，那个注解标注的方法，就会负责事件处理
        eventBus = new EventBus(consistencyTaskShardingContext.getCurrentPeerId());
        eventBus.register(this);
    }

    /**
     * 说明：这里在进行选举时候，需要加入随机休眠时间以及写锁。
     * 这样集群选举期间同一时间不会产生多个leader
     * 加读写锁是为了控制的情况如下：
     * 经过随机休眠后，如果节点A正在成为leader的过程中，但还没有完全成为leader，此时有节点B过来访问节点A的
     * /common/registerOrCancel接口时，询问节点A是否是leader节点，如果不加锁的话，会返回节点A不是leader节点，
     * 因为节点A在此时还没有完全成为leader节点，那么节点有可能出现多个leader节点的情况。
     *
     * 启动选举流程并执行执行一致性任务的分片
     */
    private void startElectionProcessAndDoTaskSharding() {
        // 随机休眠一定时间，方式多个节点同时启动，同时成为leader的情况发生。 避免类似split vote 的情况发生
        randomSleep();

        writeLock.lock();
        try {
            String leaderPeerId = "";

            // 检测集群中是否存在leader。
            // 如果存在则直接加入leader，不存在则查看集群中的其他节点是否可以通信成功。
            // 如果都除自己以外的其他节点都不能通信成功，那么leader就是自己。
            List<RegisterOrCancelResponse> registerOrCancelResponses = checkIsExistLeaderAndNotifyAddOrCancel(PeerOpTypeEnum.ONLINE.getOpType());
            leaderPeerId = getLeaderIdByTransportCheck(registerOrCancelResponses);

            // 这块用于最终兜底，如果最终还是没有leaderId，就用最小id作为leader
            if (StringUtils.isEmpty(leaderPeerId)) {
                // 使用最小节点id作为leader
                leaderPeerId = getMinPeerIdAsLeader(peersConfigList);
            }

            if (StringUtils.isEmpty(leaderPeerId)) {
                log.warn("未找到leaderPeerId，退出选举流程");
                return;
            }
            // 将当前的leaderId设置到分片节点上下文
            consistencyTaskShardingContext.setCurrentLeaderPeerId(leaderPeerId);

            // 判断选出的leader是不是自己
            boolean electionResult = leaderIsMySelf(leaderPeerId, availableShardingInstances);
            log.info("当前节点成为 [{}] 节点", electionResult ? "leader" : "follower");

            // 如果分片上下文中的分片结果为空 且 当前节点是leader节点
            if (ObjectUtil.isEmpty(consistencyTaskShardingContext.getTaskSharingResult()) && electionResult) {
                // 执行任务分片
                consistencyTaskShardingHandler.doTaskSharding(peersConfigList);
            }

            // 启动当前节点相关的所有定时调度器
            startCurrentPeerAllScheduler(electionResult);
        } finally {
            writeLock.unlock();
        }
    }

    /**
     * 根据节点之间的通信获取leader id
     *
     * @param registerOrCancelResponses 上线或下线的返回结果
     * @return leader id
     * 如果拿到的返回结果中通信失败的列表不为空且等于除自己以外的节点数量，那么说明当前集群中没有其他节点，只有自己，此时leader就是自己
     * <p>
     * 如果拿到的结果中存在leader节点的响应，则加入leader节点
     */
    private String getLeaderIdByTransportCheck(List<RegisterOrCancelResponse> registerOrCancelResponses) {
        // 后启动的节点会走这里，加入leader
        if (!CollectionUtil.isEmpty(registerOrCancelResponses)) {
            // 获取访问失败的列表
            List<RegisterOrCancelResponse> resourceAccessFailList = registerOrCancelResponses.stream()
                    .filter(e -> !e.isAccessSuccess())
                    .collect(Collectors.toList());

            // 除了自己以外的其他节点通信都是失败的, 那么leader就是自己
            if (!CollectionUtil.isEmpty(resourceAccessFailList) && resourceAccessFailList.size() == peersConfigList.size() - 1) {
                return consistencyTaskShardingContext.getCurrentPeerId();
            } else {
                // 拿到leader节点的响应
                registerOrCancelResponses = registerOrCancelResponses.stream().filter(RegisterOrCancelResponse::isLeader).collect(Collectors.toList());
                if (!CollectionUtil.isEmpty(registerOrCancelResponses)) {
                    RegisterOrCancelResponse registerResponse = registerOrCancelResponses.get(0);
                    return registerResponse.getReplyPeerId();
                }
            }
        }
        return "";
    }

    /**
     * 检测集群中是否存在leader并通知各个节点当前节点上线或者下线的操作
     *
     * @return 结果
     */
    private List<RegisterOrCancelResponse> checkIsExistLeaderAndNotifyAddOrCancel(Integer peerOpType) {
        // 获取当前节点在集群中的唯一标识
        String currentPeerIdentify = NetUtils.getCurrentPeerAddress() + ":" + currentServerPort + ":" + consistencyTaskShardingContext.getCurrentPeerId();

        // 发送注册请求给除自己以外的其他节点
        for (String peer : peersConfigList) {
            if (StringUtils.isEmpty(peer)) {
                continue;
            }
            if (peer.equals(currentPeerIdentify)) {
                continue;
            }

            peerRegisterPool.submit(() -> {
                try {
                    return sendRegOrCancelRequest(peer, peerOpType);
                } catch (Exception e) {
                    log.error("上下线请求发生异常 {}", e.getMessage());
                    if (e instanceof ResourceAccessException) {
                        return RegisterOrCancelResponse
                                .builder()
                                .leader(false)
                                .accessSuccess(false)
                                .build();
                    } else {
                        return RegisterOrCancelResponse
                                .builder()
                                .leader(false)
                                .accessSuccess(true)
                                .build();
                    }
                }
            });
        }

        List<RegisterOrCancelResponse> result = new ArrayList<>(peersConfigList.size());

        try {
            // 拿到所有的结果再返回
            for (int i = 0; i < peersConfigList.size() - 1; i++) {
                RegisterOrCancelResponse registerResponse = peerRegisterPool.take().get();
                result.add(registerResponse);
            }
            return result;
        } catch (Exception e) {
            log.error("获取集群中leader结果时，发生异常", e);
            return result;
        }
    }

    /**
     * 向集群中的各个节点发送节点上线/下线的请求
     *
     * @param peer       节点信息
     * @param peerOpType 集群节点操作类型
     * @return 注册请求响应信息
     */
    private RegisterOrCancelResponse sendRegOrCancelRequest(String peer, Integer peerOpType) {
        // 获取leader节点的 ip:port
        String transportAddress = peer.substring(0, peer.lastIndexOf(":"));
        // 获取请求url
        String url = String.format(PeerTransportEnum.REGISTRY_URL_TEMPLATE.getUrl(), transportAddress);
        // 构造请求对象
        RegisterOrCancelRequest request = RegisterOrCancelRequest.builder()
                .ip(NetUtils.getCurrentPeerAddress())
                .port(String.valueOf(currentServerPort))
                .peerId(consistencyTaskShardingContext.getCurrentPeerId())
                .opType(peerOpType)
                .leaderOffline(consistencyTaskShardingContext.getCurrentPeerId().equals(consistencyTaskShardingContext.getCurrentLeaderPeerId()))
                .build();
        // 构造请求实体
        HttpEntity<RegisterOrCancelRequest> entity = new HttpEntity<>(request, null);
        // 构造返回值参数化类型
        ParameterizedTypeReference<CommonRes<RegisterOrCancelResponse>> parameterizedTypeReference =
                new ParameterizedTypeReference<CommonRes<RegisterOrCancelResponse>>() {
                };
        log.info("发送上线或下线请求的url为 {}", url);

        // 发送请求
        RestTemplateUtils restTemplateUtils = new RestTemplateUtils();
        ResponseEntity<CommonRes<RegisterOrCancelResponse>> exchange = restTemplateUtils.exchange(url,
                HttpMethod.POST, entity, parameterizedTypeReference);
        // 解析结果
        if (HttpStatus.OK.value() == exchange.getStatusCode().value() && exchange.hasBody()) {
            CommonRes<RegisterOrCancelResponse> res = exchange.getBody();
            if (res == null) {
                return null;
            }
            return res.getData();
        } else {
            return null;
        }
    }

    /**
     * 启动任务执行引擎
     */
    private void startTaskExecuteEngine() {
        // 启动执行引擎
        schedulerManager.createConsistencyTaskScheduler(this::doStartTaskExecuteEngine);
    }

    /**
     * 启动当前节点所有调度器
     *
     * @param electionResult 是否为leader
     */
    private void startCurrentPeerAllScheduler(boolean electionResult) {
        // 取消所有当前节点相关的调度任务
        schedulerManager.cancelAllScheduler();

        // 如果判断当前节点是leader节点
        if (electionResult) {
            heartbeatResponseTable = new HashMap<>();
            // 创建leader发送心跳的定时调度任务 同时也会发送任务分片信息
            // 作为 leader 每隔 10s, 要发送一下心跳消息给所有的 follower
            schedulerManager
                    .createLeaderToFollowerHeartbeatScheduler(
                            () -> sendHeartbeatTask(availableShardingInstances,
                                    consistencyTaskShardingContext.getCurrentLeaderPeerId())
                    );
            // 创建用于检测follower是否存活的调度任务
            schedulerManager.createFollowerAliveCheckScheduler(this::doFollowerAliveCheck);
        } else {
            // 如果是follower节点需要创建的线程以及需要初始化的对象
            recentlyFollowerToLeaderHeartbeatResponse = FollowerToLeaderHeartbeatResponse
                    .builder()
                    .success(true)
                    .replyTimestamp(System.currentTimeMillis())
                    .build();
            // 创建follower发送给leader的定时调度任务
            schedulerManager.createFollowerHeartbeatScheduler(this::sendFollowerHeartbeatRequest);
            // 创建用于检测leader是否宕机的定时调度任务
            schedulerManager.createLeaderAliveScheduler(this::doLeaderAliveCheck);
        }
    }

    /**
     * 设置当前节点在集群中的id
     *
     * @param peersConfigList 一致性任务集群配置列表
     */
    private void setCurrentPeerId(List<String> peersConfigList) {
        // 当前节点的ip+端口
        String currentPeerAddress = NetUtils.getCurrentPeerAddress() + ":" + currentServerPort;
        for (String peerAddress : peersConfigList) {
            if (peerAddress.contains(currentPeerAddress)) {
                String currentPeerId = peerAddress.replaceAll(currentPeerAddress + ":", "");
                consistencyTaskShardingContext.setCurrentPeerId(currentPeerId);
            }
        }
    }

    /**
     * 获取集群节点中最小id的那个节点作为作为leader
     *
     * @param peersConfigList 节点信息列表
     * @return 最小节点的id
     */
    private String getMinPeerIdAsLeader(List<String> peersConfigList) {
        String leaderPeerId = "";
        for (String peerInfo : peersConfigList) {
            String[] split = peerInfo.split(":");
            // 节点id
            String peerId = split[2];
            if (StringUtils.isEmpty(leaderPeerId)) {
                leaderPeerId = peerId;
            } else {
                if (Integer.parseInt(leaderPeerId) > Integer.parseInt(peerId)) {
                    leaderPeerId = peerId;
                }
            }
        }
        return leaderPeerId;
    }

    /**
     * follower检测leader是否存活
     */
    private void doLeaderAliveCheck() {
        // 如果follower超过指定阈值还无法与leader通信，判定为leader宕机重新选举，重新下发任务分片
        if (System.currentTimeMillis() - recentlyFollowerToLeaderHeartbeatResponse.getReplyTimestamp()
                > tendConsistencyConfiguration.getJudgeLeaderDownSecondsThreshold() * 1000L) {
            String currentLeaderPeerId = consistencyTaskShardingContext.getCurrentLeaderPeerId();
            String peerAddress = availableShardingInstances.get(currentLeaderPeerId);
            peersConfigList.remove(peerAddress + ":" + currentLeaderPeerId);
            // 重新选举和任务分片
            reShardTaskAndReElection();
        }
    }

    /**
     * follower向leader发送心跳请求
     */
    private void sendFollowerHeartbeatRequest() {
        // 获取leader节点的 ip:port
        String transportAddress = availableShardingInstances.get(consistencyTaskShardingContext.getCurrentLeaderPeerId());
        // 获取请求url
        String url = String.format(PeerTransportEnum.FOLLOWER_HEARTBEAT_URL_TEMPLATE.getUrl(), transportAddress);
        RestTemplateUtils restTemplateUtils = new RestTemplateUtils();
        // 构造发送给leader的心跳请求对象
        FollowerToLeaderHeartbeatRequest request = FollowerToLeaderHeartbeatRequest.builder()
                .peerId(consistencyTaskShardingContext.getCurrentPeerId())
                .build();
        // 构造请求实体
        HttpEntity<FollowerToLeaderHeartbeatRequest> entity = new HttpEntity<>(request, null);
        try {
            // 构造返回值参数化类型
            ParameterizedTypeReference<CommonRes<FollowerToLeaderHeartbeatResponse>> parameterizedTypeReference =
                    new ParameterizedTypeReference<CommonRes<FollowerToLeaderHeartbeatResponse>>() {
                    };
            // 发送请求
            ResponseEntity<CommonRes<FollowerToLeaderHeartbeatResponse>> exchange = restTemplateUtils.exchange(url,
                    HttpMethod.POST, entity, parameterizedTypeReference);
            // 解析结果
            if (HttpStatus.OK.value() == exchange.getStatusCode().value() && exchange.hasBody()) {
                CommonRes<FollowerToLeaderHeartbeatResponse> res = exchange.getBody();
                if (res == null) {
                    return;
                }
                recentlyFollowerToLeaderHeartbeatResponse = res.getData();
                log.info("follower收到leader心跳响应为 {}", JSONUtil.toJsonStr(res));
            } else {
                log.info("follower收到leader心跳响应为 {}", exchange.getStatusCodeValue());
            }
        } catch (Exception e) {
            log.error("发送心跳异常: {}", e.getMessage());
        }
    }

    /**
     * 构建可用于分片的实例map
     *
     * @param peersConfigList 集群列表
     *                        最终格式：key:peerId, value:ip:port
     */
    private void buildAvailableShardingInstances(List<String> peersConfigList) {
        for (String peerInfo : peersConfigList) {
            String[] split = peerInfo.split(":");
            // 节点ip地址
            String ip = split[0];
            // 节点端口
            String port = split[1];
            // 节点id
            String peerId = split[2];
            // 构造可用于分片集群节点的实例信息map key:peerId, value:ip:port
            availableShardingInstances.put(peerId, ip + ":" + port);
        }
    }

    /**
     * leader发送心跳包给其他节点，同时也会将分片信息发给其他节点
     *
     * @param peerInfoMap     集群地址map格式
     * @param currentLeaderId 当前leader节点的id
     */
    private void sendHeartbeatTask(Map<String, String> peerInfoMap, String currentLeaderId) {
        // 开启发送心跳给其他节点的定时任务
        log.info("进入发送心跳的定时调度任务");
        // 构造分片请求
        LeaderToFollowerHeartbeatRequest request = LeaderToFollowerHeartbeatRequest.builder()
                .currentLeaderId(currentLeaderId)
                .cacheSharingResult(consistencyTaskShardingContext.getTaskSharingResult())
                .checksum(consistencyTaskShardingContext.getChecksum())
                .build();
        RestTemplateUtils restTemplateUtils = new RestTemplateUtils();
        Set<String> keySet = peerInfoMap.keySet();
        // 发送分片上下文
        for (String key : keySet) {
            if (!key.equals(currentLeaderId)) {
                String url = peerInfoMap.get(key);
                doSendHeartbeatTask(restTemplateUtils, url, request);
            }
        }
    }

    /**
     * 发送心跳请求
     *
     * @param restTemplateUtils 请求工具实例
     * @param url               请求url
     * @param request           心跳请求对象
     */
    private void doSendHeartbeatTask(RestTemplateUtils restTemplateUtils, String url, LeaderToFollowerHeartbeatRequest request) {
        HttpEntity<LeaderToFollowerHeartbeatRequest> entity = new HttpEntity<>(request, null);
        try {
            // 构造返回值参数化类型
            ParameterizedTypeReference<CommonRes<LeaderToFollowerHeartbeatResponse>> parameterizedTypeReference =
                    new ParameterizedTypeReference<CommonRes<LeaderToFollowerHeartbeatResponse>>() {
                    };
            // 获取leader发送给follower的心跳请求URL
            String leaderHeartbeatUrl = String.format(PeerTransportEnum.LEADER_HEARTBEAT_URL_TEMPLATE.getUrl(), url);
            // 发送请求
            ResponseEntity<CommonRes<LeaderToFollowerHeartbeatResponse>> exchange = restTemplateUtils.exchange(leaderHeartbeatUrl,
                    HttpMethod.POST, entity, parameterizedTypeReference);
            // 解析请求结果
            if (HttpStatus.OK.value() == exchange.getStatusCode().value() && exchange.hasBody()) {
                CommonRes<LeaderToFollowerHeartbeatResponse> res = exchange.getBody();
                log.info("收到心跳响应为 {}", JSONUtil.toJsonStr(res));
                assert res != null;
                if (res.getSuccess()) {
                    LeaderToFollowerHeartbeatResponse response = res.getData();
                    heartbeatResponseTable.put(response.getResponsePeerId(), response);
                }
            } else {
                log.info("收到心跳响应为 {}", exchange.getStatusCodeValue());
            }
        } catch (Exception e) {
            log.error("发送心跳异常: {}", e.getMessage());
        }
    }

    /**
     * 如果使用kill方式杀掉进程 使用该线程 来检测follower是否存活
     * 检查follower节点心跳响应是否超过阈值
     * 如果超过阈值，则剔除该节点，然后重新对任务进行分片
     */
    private void doFollowerAliveCheck() {
        heartbeatResponseTable.forEach((peerId, leaderToFollowerHeartbeatResponse) -> {
            // 每个 follower 最近一次接收到 leader 心跳, 以及针对 leader 心跳返回响应时间
            // 当前时间 - 上一次的 follower 心跳时间, 这个时间间隔超过 120s 说明 follower 宕机了
            if (System.currentTimeMillis() - leaderToFollowerHeartbeatResponse.getLastResponseTs() >=
                    tendConsistencyConfiguration.getJudgeFollowerDownSecondsThreshold() * 1000L) {
                log.info("检测到peerId={}的节点超过给定阈值内未与leader建立通信关系，leader重新规划任务分片", peerId);
                // ip:port
                String peerAddress = availableShardingInstances.get(peerId);
                peersConfigList.remove(peerAddress + ":" + peerId);
                // 重新基于还剩余的存活的 follower 列表, 重新分片分配, 会在下一次的 leader 发送的心跳时候, 把最新的分片分配的信息带过去
                consistencyTaskShardingHandler.doTaskSharding(peersConfigList);
            }
        });
    }

    /**
     * 重新划分任务分片或重新选举
     */
    private void reShardTaskAndReElection() {
        clearShardingContext();
        startElectionProcessAndDoTaskSharding();
    }

    /**
     * 解析配置文件配置的一致性任务集群地址信息为list格式
     *
     * @param peersConfig 配置信息
     * @return 集群地址列表
     */
    private List<String> parsePeersConfigToList(String peersConfig) {
        String[] splitPeers = peersConfig.split(",");
        List<String> peersConfigList = new ArrayList<>(splitPeers.length);
        peersConfigList.addAll(Arrays.asList(splitPeers));
        return peersConfigList;
    }

    /**
     * 判断自己是不是leader
     *
     * @param leaderPeerId 选举出来的leader角色的id
     * @param peerInfoMap  集群节点信息的map
     * @return 结果
     */
    private boolean leaderIsMySelf(String leaderPeerId, Map<String, String> peerInfoMap) {
        String currentPeerAddress = NetUtils.getCurrentPeerAddress() + ":" + this.currentServerPort;
        String leaderIpAndPort = peerInfoMap.get(leaderPeerId);
        return currentPeerAddress.equals(leaderIpAndPort);
    }

    /**
     * 启动任务执行引擎
     */
    private void doStartTaskExecuteEngine() {
        try {
            taskScheduleManager.performanceTask();
        } catch (Exception e) {
            log.error("执行任务时，发生异常", e);
        }
    }

    /**
     * 当完成任务分片的事件发生
     *
     * @param taskShardingResult 任务分片结果
     */
    @Subscribe()
    public void onFinishTaskSharding(Map<String, List<Long>> taskShardingResult) {
        buildShardingContext(taskShardingResult);
    }

    /**
     * 当集群成员变更的事件发生
     *
     * @param registerResponse 节点注册响应
     */
    @Subscribe
    public void onPeerGroupChanged(RegisterOrCancelResponse registerResponse) {
        log.info("收到集群成员变更的消息 内容为:{}", JSONUtil.toJsonStr(registerResponse));
        Integer opType = registerResponse.getRegisterOrCancelRequest().getOpType();
        RegisterOrCancelRequest registerOrCancelRequest = registerResponse.getRegisterOrCancelRequest();
        // 上下线节点的唯一标识
        String peerIdentify = getNewPeerIdentify(registerOrCancelRequest);
        // 如果是leader且发送的是上线请求
        if (PeerOpTypeEnum.ONLINE.getOpType().equals(opType) && registerResponse.isLeader()) {
            log.info("进入到新节点重新注册的流程");
            if (!peersConfigList.contains(peerIdentify)) {
                peersConfigList.add(peerIdentify);
                // 任务重新分片
                consistencyTaskShardingHandler.doTaskSharding(peersConfigList);
                log.info("新增节点[{}]完毕，已启动重新分片的流程", peerIdentify);
            }
            // 如果是下线请求 且 下线的是follower
        } else if (PeerOpTypeEnum.OFFLINE.getOpType().equals(opType) && !registerResponse.getRegisterOrCancelRequest().isLeaderOffline()) {
            log.info("进入到follower节点下线的流程");
            if (peersConfigList.contains(peerIdentify)) {
                peersConfigList.remove(peerIdentify);
                // 当前处理下线的是leader节点
                if (registerResponse.isLeader()) {
                    heartbeatResponseTable.remove(registerOrCancelRequest.getPeerId());
                    // 任务重新分片
                    consistencyTaskShardingHandler.doTaskSharding(peersConfigList);
                }
                log.info("删除节点[{}]完毕，已启动重新分片的流程", peerIdentify);
            }
            // 如果是下线请求且下线的是leader节点 接收请求的是follower
        } else if (PeerOpTypeEnum.OFFLINE.getOpType().equals(opType) && registerResponse.getRegisterOrCancelRequest().isLeaderOffline() && !registerResponse.isLeader()) {
            log.info("进入到leader节点下线 重新选举leader的流程");
            if (peersConfigList.contains(peerIdentify)) {
                peersConfigList.remove(peerIdentify);
                // 重新选举和任务分片
                reShardTaskAndReElection();
                log.info("leader [{}] 下线完毕，已启动重新选举和重新分片的流程", peerIdentify);
            }
        }
    }

    /**
     * 构造根据节点注册请求构造节点唯一标识
     *
     * @param registerOrCancelRequest 注册请求
     * @return 新节点的唯一标识
     */
    private String getNewPeerIdentify(RegisterOrCancelRequest registerOrCancelRequest) {
        StringJoiner newPeerIdentifyJoiner = new StringJoiner(":", "", "");
        newPeerIdentifyJoiner
                .add(registerOrCancelRequest.getIp())
                .add(registerOrCancelRequest.getPort())
                .add(registerOrCancelRequest.getPeerId());
        return newPeerIdentifyJoiner.toString();
    }

    /**
     * 当前节点的分片索引好列表
     *
     * @return 当前节点的分片索引好列表
     */
    public List<Long> getMyTaskShardIndexes() {
        readLock.lock();
        try {
            return consistencyTaskShardingContext.getTaskSharingResult()
                    .get(NetUtils.getCurrentPeerAddress() + ":" + this.currentServerPort + ":" + consistencyTaskShardingContext.getCurrentPeerId());
        } finally {
            readLock.unlock();
        }
    }

    /**
     * 获取一致性任务分片上下文
     *
     * @return 上下文
     */
    public ConsistencyTaskShardingContext getConsistencyTaskShardingContext() {
        readLock.lock();
        try {
            return consistencyTaskShardingContext;
        } finally {
            readLock.unlock();
        }
    }

    /**
     * 根据leaderToFollowerHeartbeatRequest请求对象 设置分片上下文 follower收到leader的分片结果时使用
     *
     * @param leaderToFollowerHeartbeatRequest leader往follower发送心跳请求
     */
    public void setConsistencyTaskShardingContext(LeaderToFollowerHeartbeatRequest leaderToFollowerHeartbeatRequest) {
        writeLock.lock();
        try {
            consistencyTaskShardingContext.setChecksum(leaderToFollowerHeartbeatRequest.getChecksum());
            consistencyTaskShardingContext.setTaskSharingResult(leaderToFollowerHeartbeatRequest.getCacheSharingResult());
        } finally {
            writeLock.unlock();
        }
    }

    /**
     * 构造分片上下文
     *
     * @param taskShardingResult 一致性任务分片结果
     */
    private void buildShardingContext(Map<String, List<Long>> taskShardingResult) {
        writeLock.lock();
        try {
            consistencyTaskShardingContext.setTaskSharingResult(taskShardingResult);
            consistencyTaskShardingContext.setChecksum(SecureUtil.md5(JSONUtil.toJsonStr(taskShardingResult)));
        } finally {
            writeLock.unlock();
        }
    }

    /**
     * 清空一致性任务上下文中的分片信息
     */
    private void clearShardingContext() {
        readLock.lock();
        try {
            consistencyTaskShardingContext.setTaskSharingResult(null);
        } finally {
            readLock.unlock();
        }
    }

    private void randomSleep() {
        try {
            int sleepTime = RANDOM.nextInt(2000) + 2000;
            log.info("随机休眠时间为 {}", sleepTime);
            Thread.sleep(sleepTime);
        } catch (Exception e) {
            log.error("随机休眠时，发生异常", e);
        }
    }
}
