import logging

logging.basicConfig(
    format="%(asctime)s,%(msecs)03d-[%(levelname)s][%(process)d][%(threadName)s][%(name)s:%(lineno)s]: %(message)s",
    datefmt="%Y-%m-%d %H:%M:%S",
    level=logging.INFO,
)

import util
import time
import discover
import queue
import threading

from pysyncobj import SyncObj, replicated, SyncObjConf


logger = logging.getLogger(__name__)


class DynamicMemberSyncObj(SyncObj):

    PORT = 4321
    # 节点状态
    NODE_STATUS = {0: "FOLLOWER", 1: "CANDIDATE", 2: "LEADER"}

    def __init__(self):

        logger.info("--------------------------------------------------")
        # self.__data = []
        self.__data = 0
        self.event_ready = threading.Event()

        # 先启动ssdp收集集群节点信息
        self.ssdp = discover.SsdpDevice()
        self.ssdp.start()
        time.sleep(5)

        ssdp_cache = discover.SsdpDevice.SSDP_EXPIRE

        # 以ssdp收集到的节点信息组建集群
        self_node = f"{util.get_local_ip()}:{self.PORT}"
        other_nodes = [f"{n}:{self.PORT}" for n in self.ssdp.peers.get_eles()]
        logger.info(f"----- init with other_nodes: {sorted(other_nodes)}")
        super().__init__(
            self_node,
            other_nodes,
            # https://pysyncobj.readthedocs.io/en/latest/pysyncobj.html#syncobjconf
            conf=SyncObjConf(
                dynamicMembershipChange=True,
                # journalFile="./journal.txt",
                onReady=lambda: self.event_ready.set(),
                onStateChanged=self.__on_node_status_changed,
                # 指定时间内未收到大多数follower响应，leader降级成follower
                leaderFallbackTimeout=10,
                connectionTimeout=5,
                # 若节点的心跳超时小于ssdp节点的缓存时间, 则follower会先于ssdp发现leader节点不可达, 即集群中存在坏节点，可能会导致选举不成功
                # 所以将心跳超时设置大于ssdp缓存时间，让ssdp先发现节点变更，然后更新集群配置，之后follower才发现leader不可达，然后基于新的配置发起选举
                # 若时间过长，又会导致选举变慢
                raftMinTimeout=ssdp_cache + 1.2,
                raftMaxTimeout=ssdp_cache + 1.6,
            ),
        )
        self.__wait_for_ready()  # 等待集群就绪--leader已确定，数据已同步
        self.log("-------------------------- syncobj ready")

        # 等待期间若ssdp节点发生变更，将导致集群配置和ssdp不一致，不一致会怎样？？
        self.apply_ssdp_nodes()
        self.ssdp.member_changed = self.__on_cluster_member_changed

    def apply_ssdp_nodes(self):
        """
        检查ssdp节点和集群配置是否一致, 若不一致则以ssdp的位置

        返回集群配置和ssdp的差异部分
        """
        ssdp_nodes = self.ssdp.peers.get_eles()
        cluster_nodes = {n.ip for n in self.otherNodes}
        if ssdp_nodes != cluster_nodes:
            new_conf_nodes = [f"{n}:{self.PORT}" for n in ssdp_nodes]
            self._SyncObj__updateClusterConfiguration(new_conf_nodes)
            logger.info(
                f"reset cluster configuration with ssdp nodes: {new_conf_nodes}  ---> {self.cluster_nodes}"
            )
        return cluster_nodes - ssdp_nodes

    def log(self, msg):
        status = self.NODE_STATUS[self.getStatus()["state"]]
        leader_ip = self._getLeader().ip if self._getLeader() else None
        logger.info(
            f"{status}-{self.raftCurrentTerm}-{self.raftCommitIndex}-{self.raftLastApplied}({leader_ip}): {msg}"
        )

    def __on_cluster_member_changed(self, added_nodes, removed_nodes):
        """
        ssdp节点变更回调
        """

        self.log("----------------------------------- ssdp peers changed")
        self.log(f"--------      added_nodes: {added_nodes}")
        self.log(f"--------      removed_nodes: {removed_nodes}")

        # 只有leader节点可处理节点变更
        if self._isLeader():
            # 将变更的节点加入队列，等待处理
            for added in added_nodes or []:
                self.new_nodes.put(f"{added}:{self.PORT}")

            if removed_nodes:
                # 如果删除removed_nodes后集群只剩下leader，则没必要逐个删，直接重置集群为单节点集群
                cluster_nodes = {n.ip for n in self.otherNodes}
                remain_nodes = cluster_nodes - removed_nodes
                if not remain_nodes:
                    self.log("only leader left, reset cluster configuration")
                    self._SyncObj__updateClusterConfiguration([])
                else:
                    # ‘删除节点’这个操作也会产生一个logEntry,然后同步到其他节点；
                    # 若集群剩余节点未超过半数，则这个操作将无法得到确认，最终无法提交，进而导致leader回退
                    self.log(f"remain_nodes: {remain_nodes}")
                    if len(remain_nodes) < int((len(cluster_nodes) + 1) / 2):
                        self.log(
                            "Only a small part of nodes left, reset cluster configuration"
                        )
                        self._SyncObj__updateClusterConfiguration(
                            [f"{n}:{self.PORT}" for n in remain_nodes]
                        )
                    # else:
                for removed in removed_nodes or []:
                    self.remove_nodes.put(f"{removed}:{self.PORT}")
        else:
            # 如果集群中只剩下该节点，则集群退化为单节点集群，该节点就是leader
            if removed_nodes:
                cluster_nodes = {n.ip for n in self.otherNodes}
                remain_nodes = cluster_nodes - removed_nodes
                new_nodes = [f"{n}:{self.PORT}" for n in remain_nodes]
                self._SyncObj__updateClusterConfiguration(new_nodes)
                self.log(f"update cluster configuration: {new_nodes}")

    @replicated
    def incCounter(self):
        """
        每次调用只是将操作追加到日志，并没有立即修改值，而是需要先将日志广播到集群中的大多数节点.
        当集群中的大多数节点响应后,主节点才会提交日志，真正去执行
        """
        self.__data += 1

    def getCounter(self):
        return self.__data

    def _SyncObj__onMessageReceived(self, node, message):
        self.log(f"received message from {node.ip}: {message}")
        super()._SyncObj__onMessageReceived(node, message)

    def __on_node_status_changed(self, old_state, new_state):
        """
        集群节点状态变更回调
        """

        self.log(
            f"state changed: {self.NODE_STATUS[old_state]} -> {self.NODE_STATUS[new_state]}, cluster nodes: {sorted(self.cluster_nodes)}"
        ),

        # 变更的场景：
        #   - Follower --> Candidate
        #       Follower发现Leader心跳超时，变成Candidate，发起新的选举
        #   - Candidate --> Follower
        #       选举失败
        #   - Candidate --> Candidate
        #       选举失败
        #   - Candidate --> Leader
        #       选举成功
        #   - Leader --> Follower
        #       指定时间内未能收到大多数follower响应，fallback -- 某些follower节点不可达
        #       收到其他Leader的appendEntries请求，且消息的所属任期不小于当前节点

        # 变为leader时，开启两个线程，消费对应queue中的数据，当leader降级后自动结束消费
        if new_state == 2:
            self.log(
                f"become new leader, cluster other nodes: {sorted(self.cluster_nodes)}"
            )

            self.new_nodes = queue.Queue()  # 待加入集群的节点
            self.remove_nodes = queue.Queue()  # 待删除的节点

            threading.Thread(
                name="worker_add_node", target=self.__consume_new_nodes_queue
            ).start()
            threading.Thread(
                name="worker_remove_node", target=self.__consume_remove_nodes_queue
            ).start()

            # 虽然新的选举成功了，但集群配置的节点(self.cluster_nodes)中可能存在离线的节点，若不一致以ssdp为准重新配置集群
            removed_nodes = self.apply_ssdp_nodes()
            for n in removed_nodes:
                self.remove_nodes.put(n)

        if old_state == 2 and new_state == 0:
            self.log("leader down")
            removed_nodes = self.apply_ssdp_nodes()
            for n in removed_nodes:
                self.removeNodeFromCluster(f"{n}:{self.PORT}")

    def __consume_new_nodes_queue(self):

        # 添加node完成标记 -- 只是完成，不关心是否成功
        change_mamber_done = threading.Event()

        def callback_wrapper(node_addr):
            """
            成员变更结束回调
            """

            def callback(*args):
                change_mamber_done.set()
                if args[1] == 0:
                    self.log(
                        f"add node success: {node_addr}, cluster nodes: {self.cluster_nodes}"
                    )
                else:
                    self.log(f"add node('{node_addr}') failed: {args[1]},")

            return callback

        while self._isLeader():
            try:
                node_addr = self.new_nodes.get(timeout=0.5)
            except Exception:
                continue

            change_mamber_done.clear()
            # 再次判断是否为leader：在queue.get等待时节点状态可能变更
            if self._isLeader() and node_addr not in self.cluster_nodes:
                self.log(
                    f"-------- before addNodeToCluster called - {node_addr}, cluster nodes:{sorted(self.cluster_nodes)}"
                )
                self.addNodeToCluster(node_addr, callback=callback_wrapper(node_addr))
                self.log(
                    f"-------- after addNodeToCluster called - {node_addr}, cluster nodes:{sorted(self.cluster_nodes)}"
                )
                # addNodeToCluster是异步操作，文档要求当本次完成后才可以调用下一次，remove也是.
                change_mamber_done.wait()

        self.log("__consume_new_nodes_queue stop")

    def __consume_remove_nodes_queue(self):
        change_member_done = threading.Event()

        def callback_wrapper(node_addr):
            def callback(*args):
                change_member_done.set()
                if args[1] == 0:
                    self.log(
                        f"remove node success: {node_addr}, cluster nodes: {self.cluster_nodes}"
                    )
                else:
                    self.log(f"remove node('{node_addr}') failed: {args[1]},")

            return callback

        while self._isLeader():
            try:
                node_addr = self.remove_nodes.get(timeout=0.5)
                self.log(f"begin remove node: {node_addr}")
            except Exception:
                continue

            change_member_done.clear()
            if self._isLeader() and node_addr in self.cluster_nodes:
                self.removeNodeFromCluster(
                    node_addr, callback=callback_wrapper(node_addr)
                )
                self.log("-------- removeNodeFromCluster called")
                change_member_done.wait()

        self.log("__consume_remove_nodes_queue stop")

    @property
    def cluster_nodes(self):
        return {n.address for n in self.otherNodes}

    def __wait_for_ready(self):
        self.event_ready.wait()


xx = DynamicMemberSyncObj()
# print("+++++++++++++++++++++++++++++++++")
# xx.addNodeToCluster("192.168.0.222:4321")


def do_inc():
    for _ in range(10):
        xx.incCounter()
        xx.log(f"----------------------------- after add: {xx.getCounter()}")
        time.sleep(0.2)


threading.Thread(target=do_inc).start()

while True:
    xx.log("")
    xx.log(f"{xx.getCounter()}")
    time.sleep(1)
