import time
import threading
import concurrent.futures
from enum import Enum
from dataclasses import dataclass, field

from motor.utils.logger import get_logger
from motor.utils.singleton import ThreadSafeSingleton
from motor.config.controller import ControllerConfig
from motor.resources.instance import Instance, NodeManagerInfo
from motor.controller.core.observer import Observer, ObserverEvent
from motor.controller.core.instance_manager import InstanceManager, InsStatus
from motor.controller.ft.cluster_grpc import cluster_fault_pb2
from motor.controller.ft.cluster_grpc.cluster_grpc_client import ClusterNodeClient
from motor.controller.ft.strategy.strategy import StrategyBase, generate_strategy_map


logger = get_logger(__name__)


class Status(int, Enum):
    HEALTHY = 0
    SUB_HEALTHY = 1
    UNHEALTHY = 2

@dataclass
class DeviceFaultInfo:
    device_type: str # npu, switch, node, PSU, disk......
    rank_id: int # only npu has rank_id, others use -1.
    fault_code: int = 0x0
    fault_level: str = "L1" # L1, L2, L3, L4, L5, L6
    fault_type: str | None = None
    fault_reason: str | None = None


@dataclass
class ServerMetadata:
    """
    Each server metadata represents a server in the cluster.
    And An instance may have multiple servers.
    
    We don't determine the server's status, we just use the 
    broadcast message to update the server's status.  And the
    `device_fulat_infos` is used to record the device faults 
    of the server, if there is no device fault, it will be an 
    empty list.
    """
    pod_ip: str
    host_ip: str
    status: Status = Status.HEALTHY
    device_fault_infos: list[DeviceFaultInfo] = field(default_factory=list)


@dataclass
class InstanceMetadata:
    instance_id: int
    status: Status = Status.HEALTHY
    node_managers: list[NodeManagerInfo] = field(default_factory=list)
    lock: threading.Lock = field(default_factory=threading.Lock)

    # When an instance's servers are faulty, we need to trigger
    # the recovery function, we record the current strategy,
    # strategy level and fault code. if the instance is healthy,
    # we should try to stop the strategy.
    fault_level: str = "L0" # current instance fault level, L0 means healthy, L1+ means faulty.
    fault_code: int = 0x0 # fault code that trigger the current strategy
    strategy: StrategyBase | None = None # current handling strategy

    
@dataclass
class InstanceGroupMetadata:
    """
    Instance group metadata for scale prefill to decode recovery.
    it is defferent from instance group metadata in instance assembler.
    because it interact with instance manager, which manage instances 
    by instance id, so it use instance id instead of job name.
    """
    id: int
    p_ids: list[int] = field(default_factory=list)
    d_ids: list[int] = field(default_factory=list)


class FaultManager(ThreadSafeSingleton, Observer):
    
    def __init__(self) -> None:
        # If the fault manager is already initialized, return.
        if hasattr(self, '_initialized'):
            return
            
        # Manage all servers's status with pod_ip, when it comes a faulty server,
        # we firstly find out which instance this server belongs to,
        # and then use self.instances to find out all nodes in this instance.
        self._servers: dict[str, ServerMetadata] = {}
        self._instances: dict[int, InstanceMetadata] = {}
        # For scale prefill to decode recovery
        self._groups: dict[int, InstanceGroupMetadata] = {}
        self._lock = threading.Lock()

        self._client = ClusterNodeClient('localhost', 5005)

        self._stop_event = threading.Event()

        # For dual handle function trigger, we use a thread pool executor to handle it.
        self._executor = concurrent.futures.ThreadPoolExecutor(max_workers=5)

        self._strategies = generate_strategy_map()

        self._server_status_subscriber_thread = threading.Thread(
            target=self._server_status_subscriber,
            daemon=True,
            name="ServerStatusSubscriber"
        )
        self._ft_strategy_center_thread = threading.Thread(
            target=self._ft_strategy_center,
            daemon=True,
            name="FaultToleranceStrategyCenter"
        )
        self._server_status_subscriber_thread.start()
        self._ft_strategy_center_thread.start()
        
        self._initialized = True
        logger.info("FaultManager started.")

    def _server_status_subscriber(self) -> None:
        reconnect_attempts, max_reconnect_attempts = 0, 10
        base_wait_time, max_wait_time = 5, 300

        while not self._stop_event.is_set():
            try:
                # First, ensure we are registered
                if not self._client.is_registered():
                    if not self._client.register():
                        reconnect_attempts += 1
                        if reconnect_attempts <= max_reconnect_attempts:
                            wait_time = min(base_wait_time * (2 ** (reconnect_attempts - 1)), max_wait_time)
                            logger.warning("client register failed, attempt %d/%d, retrying in %ds...",
                                            reconnect_attempts, max_reconnect_attempts, wait_time)
                            time.sleep(wait_time)
                            continue
                        else:
                            logger.error("client register failed after %d attempts, giving up for now! "
                                         "will retry later!", max_reconnect_attempts)
                            time.sleep(max_wait_time)
                            reconnect_attempts = 0  # reset counter, avoid infinite waiting
                            continue
                    else:
                        reconnect_attempts = 0
                        logger.info("client register successful")

                # Once registered, start subscription (this will block until connection is lost)
                logger.info("starting fault message subscription...")
                self._client.subscribe_fault_messages(self._process_cluster_fault_message)

                # If we reach here, the subscription ended (likely due to connection loss)
                logger.warning("fault message subscription ended, will retry...")

            except Exception as e:
                reconnect_attempts += 1
                logger.error("hardware info subscriber error: %s", e)
                self._client.close()

                if reconnect_attempts <= max_reconnect_attempts:
                    wait_time = min(base_wait_time * (2 ** (reconnect_attempts - 1)), max_wait_time)
                    logger.warning("subscriber error, attempt %d/%d, retrying in %ds...",
                                   reconnect_attempts, max_reconnect_attempts, wait_time)
                    time.sleep(wait_time)
                else:
                    logger.error("subscriber failed after %d attempts, "
                                 "giving up for now. Will retry later.", max_reconnect_attempts)
                    time.sleep(max_wait_time)
                    reconnect_attempts = 0  # reset counter, avoid infinite waiting

    def _process_cluster_fault_message(self, fault_msg: cluster_fault_pb2.FaultMsgSignal):
        try:
            # check if the fault message is valid
            if fault_msg is None:
                logger.error("Received None fault message")
                return
            if not hasattr(fault_msg, 'signalType'):
                logger.error("Fault message missing signalType attribute")
                return
            if fault_msg.signalType == "normal":
                logger.info("read fault message signalType is : %s.", fault_msg.signalType)
                return
            if not hasattr(fault_msg, 'nodeFaultInfo') or fault_msg.nodeFaultInfo is None:
                logger.warning("No nodeFaultInfo in fault message")
                return

            unhealthy_node_ips = [] # record unhealthy node ip to update status
            with self._lock:
                for node_info in fault_msg.nodeFaultInfo:
                    try:
                        if not hasattr(node_info, 'nodeIP') or not hasattr(node_info, 'faultLevel'):
                            logger.warning("Invalid node_info structure, missing required fields")
                            continue
                            
                        node_ip = getattr(node_info, 'nodeIP', None)
                        fault_level = getattr(node_info, 'faultLevel', None)
                        if node_ip is None or fault_level is None:
                            logger.warning("Missing required node_info fields (nodeIP or faultLevel)")
                            continue

                        logger.info("Get node fault level: %s, ip: %s.", fault_level, node_ip)

                        server_metadata = self._servers.get(node_ip)
                        if server_metadata is None:
                            logger.warning("Unknown server %s, skipping fault message processing.", node_ip)
                            continue
                            
                        try:
                            if hasattr(node_info, 'faultDevice') and node_info.faultDevice is not None:
                                device_faults = list(node_info.faultDevice)
                                if len(device_faults) > 1000:
                                    logger.warning("Too many device faults (%d), truncating to 1000!",
                                                   len(device_faults))
                                    device_faults = device_faults[:1000]
                                server_metadata.device_fault_infos = device_faults
                            else:
                                server_metadata.device_fault_infos = []
                        except Exception as e:
                            logger.error("Error processing device fault info for %s: %s", node_ip, e)
                            server_metadata.device_fault_infos = []

                        if fault_level == "unhealthy":
                            server_metadata.status = Status.UNHEALTHY
                            unhealthy_node_ips.append(node_ip)
                        elif fault_level == "healthy":
                            server_metadata.status = Status.HEALTHY
                        else:
                            logger.warning("Unknown fault level: %s for node %s", fault_level, node_ip)
                            
                    except Exception as e:
                        logger.error("Error processing node_info: %s", e)
                        continue

            self._separate_unhealthy_instances(unhealthy_node_ips)
            # update instances status by server status and device fault info
            self._update_instances_status() 
                    
        except Exception as e:
            logger.error("Critical error in process_cluster_fault_message: %s", e)
    
    def _separate_unhealthy_instances(self, unhealthy_node_ips: list[str]) -> None:
        # Notify instance manager to separate the unhealthy instances
        for node_ip in unhealthy_node_ips:
            try:
                instance = InstanceManager().get_instance_by_podip(node_ip)
                if instance is not None:
                    instance.update_instance_status(InsStatus.INACTIVE)
                    InstanceManager().notify(instance, ObserverEvent.INSTANCE_REMOVED)
                    logger.info("Successfully updated instance status for node %s", node_ip)
                else:
                    logger.warning("No instance found for node %s", node_ip)
            except Exception as e:
                logger.error("Error updating instance status for node %s: %s", node_ip, e)

    def _update_instances_status(self) -> None:
        with self._lock:
            ids = list(self._instances.keys())

        for id in ids:
            with self._lock:
                ins_metadata = self._instances[id]

            with ins_metadata.lock:
                # Use the server's highest level device fault to represent
                # the instance's fault level, if the instance is healthy,
                # the fault level will be L0, and the fault code will be 0x0.
                final_level, fault_code = "L0", 0x0
                for node_mgr in ins_metadata.node_managers:
                    device_fault_info = self._eval_server_status(node_mgr.pod_ip)
                    if device_fault_info is not None:
                        final_level = max(final_level, device_fault_info.fault_level)
                        fault_code = max(fault_code, device_fault_info.fault_code)
                
                ins_metadata.fault_level = final_level
                ins_metadata.fault_code = fault_code

    def _eval_server_status(self, pod_ip: str) -> DeviceFaultInfo | None:
        server_metadata = None
        with self._lock:
            server_metadata = self._servers.get(pod_ip)
        
        if server_metadata is None:
            raise ValueError(f"Server {pod_ip} not found.")

        # use the devices' highest level error to represent the server's fault level
        if (
            server_metadata.status == Status.HEALTHY
            or len(server_metadata.device_fault_infos) == 0
        ):
            return None

        highest_fault_level = "L1"
        target_fault_info = None
        for fault_info in server_metadata.device_fault_infos:
            if fault_info.fault_level > highest_fault_level:
                highest_fault_level = fault_info.fault_level
                target_fault_info = fault_info

        return target_fault_info

    def _ft_strategy_center(self) -> None:
        while not self._stop_event.is_set():
            instance_ids = []
            with self._lock:
                instance_ids = list(self._instances.keys())
            
            for instance_id in instance_ids:
                self._process_instance_strategy(instance_id)

            time.sleep(ControllerConfig.strategy_center_check_internal)

    def _process_instance_strategy(self, ins_id: int) -> None:
        """
        This function will generate the instance's strategy base on the instance's
        fault level and fault code. If the current strategy is not None, it will 
        check if the new strategy is the same as the current strategy. Below are 
        the rules:

        1.SAME_LEVEL: check if the current strategy is finished, if it is finished,
                      it will reset the relative state. 
        2.DIFFERENT_AND_UPGRADE: stop the current strategy and start the new strategy.
        3.DIFFERENT_AND_DOWNGRADE: do nothing.
        """
        ins_metadata = None
        with self._lock:
            ins_metadata = self._instances.get(ins_id)
            if ins_metadata is None:
                return
        
        with ins_metadata.lock:
            fault_level, fault_code = ins_metadata.fault_level, ins_metadata.fault_code
            new_strategy_cls = self._strategies[fault_level](fault_code, ins_id)
            current_strategy = ins_metadata.strategy
            current_cls = current_strategy.__class__ if current_strategy is not None else None

            if new_strategy_cls is not None:
                is_upgrade = False
                if current_strategy is None:
                    is_upgrade = True
                else:
                    if new_strategy_cls != current_cls:
                        current_strategy.stop()
                        ins_metadata.strategy = None
                        is_upgrade = True

                if is_upgrade:
                    new_strategy = new_strategy_cls()
                    self._executor.submit(new_strategy.execute, ins_id)
                    ins_metadata.strategy = new_strategy
                    logger.info("Set new strategy for instance %d to %s with fault code %d.",
                                ins_id, fault_level, fault_code)

            if ins_metadata.strategy is not None:
                if ins_metadata.strategy.is_finished():
                    ins_metadata.strategy = None
                    ins_metadata.fault_level = "L0"
                    ins_metadata.fault_code = 0x0
                    logger.info("Strategy for instance %d finished, reset state.", ins_id)
                else:
                    # New strategy and have unfinished strategy will both reach here.
                    ins_metadata.fault_level = fault_level
                    ins_metadata.fault_code = fault_code

    def update(self, ins: Instance, event: ObserverEvent) -> None:
        logger.info("FaultManager update instance %s with event: %s.", ins.job_name, event)

        if event == ObserverEvent.INSTANCE_ADDED:
            self._handle_instance_added(ins)
        elif event == ObserverEvent.INSTANCE_SEPERATED:
            self._handle_instance_separated(ins)
        elif event == ObserverEvent.INSTANCE_REMOVED:
            self._handle_instance_removed(ins)
        else:
            raise ValueError(f"Invalid event: {event}.")

    def _handle_instance_added(self, ins: Instance) -> None:
        ins_metadata = InstanceMetadata(
            instance_id=ins.id,
            node_managers=ins.get_node_managers()
        )
        
        server_metadatas = {}
        for node_mgr in ins_metadata.node_managers:
            server_metadatas[node_mgr.pod_ip] = ServerMetadata(
                pod_ip=node_mgr.pod_ip,
                host_ip=node_mgr.host_ip,
            )

        with self._lock:
            self._instances[ins.id] = ins_metadata
            self._servers.update(server_metadatas)

            if ins.group_id not in self._groups:
                group = InstanceGroupMetadata(id=ins.group_id)
                self._groups[ins.group_id] = group
            else:
                group = self._groups[ins.group_id]
            
            if ins.role == "prefill":
                group.p_ids.append(ins.id)
            else:
                group.d_ids.append(ins.id)

    def _handle_instance_separated(self, ins: Instance) -> None:
        with self._lock:
            if ins.id in self._instances:
                ins_metadata = self._instances[ins.id]
        
        if ins_metadata is not None:
            with ins_metadata.lock:
                ins_metadata.status = Status.UNHEALTHY

    def _handle_instance_removed(self, ins: Instance) -> None:
        node_managers = []
        with self._lock:
            if ins.id in self._instances:
                ins_metadata = self._instances[ins.id]
                node_managers = ins_metadata.node_managers.copy()
            else:
                return

        with self._lock:
            for node_mgr in node_managers:
                self._servers.pop(node_mgr.pod_ip, None)

            if ins.group_id in self._groups:
                group = self._groups[ins.group_id]
                if ins.role == "prefill" and ins.id in group.p_ids:
                    group.p_ids.remove(ins.id)
                elif ins.role == "decode" and ins.id in group.d_ids:
                    group.d_ids.remove(ins.id)
                if len(group.p_ids) == 0 and len(group.d_ids) == 0:
                    self._groups.pop(ins.group_id)

            self._instances.pop(ins.id, None)

    def stop(self) -> None:
        self._stop_event.set()
        self._ft_strategy_center_thread.join()
        # TODO: this thread took a long time to join, so we comment it out.
        # self._server_status_subscriber_thread.join()
        logger.info("FaultManager stopped.")