import uuid
import pytest
import threading
import sys
from unittest.mock import Mock, patch, MagicMock, call

# mock pb
cluster_fault_pb2 = MagicMock()
mock_pb2_grpc = MagicMock()
cluster_fault_pb2.ClientInfo = Mock
cluster_fault_pb2.FaultMsgSignal = Mock
# add cluster pb to system
sys.modules['cluster_fault_pb2'] = cluster_fault_pb2
sys.modules['cluster_fault_pb2_grpc'] = mock_pb2_grpc

from motor.controller.ft.cluster_grpc import cluster_fault_pb2
from motor.controller.core.observer import ObserverEvent
from motor.resources.instance import Instance, InsStatus, NodeManagerInfo

# Import FaultManager and related classes after mocking
from motor.controller.ft.fault_manager import (
    FaultManager,
    ServerMetadata, 
    InstanceGroupMetadata,
    InstanceMetadata,
    DeviceFaultInfo,
    Status
)


class FaultManagerTestHelper:
    """Helper class for common FaultManager test setup"""
    
    @staticmethod
    def create_mock_fault_manager():
        """Create a mock fault manager with all necessary mocks"""
        with patch.object(FaultManager, '_instances', {}):
            with patch('threading.Thread') as mock_thread_class:
                with patch('motor.controller.ft.fault_manager.ClusterNodeClient') as mock_client_class:
                    with patch('motor.controller.ft.fault_manager.generate_strategy_map') as mock_strategy_map:
                        with patch('concurrent.futures.ThreadPoolExecutor') as mock_executor_class:
                            with patch.object(FaultManager, '_server_status_subscriber') as mock_server_subscriber:
                                with patch.object(FaultManager, '_ft_strategy_center') as mock_strategy_center:
                                    # Mock threads - 创建两个不同的mock线程对象
                                    mock_server_thread = MagicMock()
                                    mock_server_thread.daemon = True
                                    mock_server_thread.is_alive.return_value = True
                                    mock_server_thread.start = Mock()  # Mock start方法，防止实际启动
                                    mock_server_thread.join = Mock()    # Mock join方法
                                    
                                    mock_strategy_thread = MagicMock()
                                    mock_strategy_thread.daemon = True
                                    mock_strategy_thread.is_alive.return_value = True
                                    mock_strategy_thread.start = Mock()  # Mock start方法，防止实际启动
                                    mock_strategy_thread.join = Mock()    # Mock join方法
                                    
                                    # 让Thread类根据不同的target返回不同的mock线程
                                    def thread_factory(target=None, daemon=None, name=None):
                                        if target == mock_server_subscriber:
                                            return mock_server_thread
                                        elif target == mock_strategy_center:
                                            return mock_strategy_thread
                                        else:
                                            # 默认返回一个通用的mock线程
                                            mock_thread = MagicMock()
                                            mock_thread.daemon = daemon
                                            mock_thread.is_alive.return_value = True
                                            mock_thread.start = Mock()
                                            mock_thread.join = Mock()
                                            return mock_thread
                                    
                                    mock_thread_class.side_effect = thread_factory
                                    
                                    # Mock cluster client
                                    mock_client = Mock()
                                    mock_client_class.return_value = mock_client
                                    mock_client.register = Mock(return_value=True)
                                    mock_client.subscribe_fault_messages = Mock()
                                    mock_client.close = Mock()
                                    
                                    # Mock strategy map
                                    def create_mock_strategy_class():
                                        mock_strategy = Mock()
                                        # make sure the strategy will not finished immediately.
                                        mock_strategy.is_finished.return_value = False
                                        return mock_strategy
                                    
                                    mock_strategy_map.return_value = {
                                        "L0": Mock(return_value=None),  # Healthy instances
                                        "L1": Mock(return_value=None),
                                        "L2": Mock(return_value=create_mock_strategy_class),
                                        "L3": Mock(return_value=create_mock_strategy_class),
                                        "L4": Mock(return_value=create_mock_strategy_class),
                                        "L5": Mock(return_value=create_mock_strategy_class),
                                        "L6": Mock(return_value=create_mock_strategy_class),
                                    }
                                    
                                    # Mock executor
                                    mock_executor = Mock()
                                    mock_executor_class.return_value = mock_executor
                                    
                                    # Mock _server_status_subscriber to prevent actual grpc calls
                                    mock_server_subscriber.return_value = None
                                    
                                    # Mock _ft_strategy_center to prevent actual strategy execution
                                    mock_strategy_center.return_value = None
                                    
                                    manager = FaultManager()
                                    
                                    # 验证线程被正确mock
                                    assert manager._server_status_subscriber_thread == mock_server_thread
                                    assert manager._ft_strategy_center_thread == mock_strategy_thread
                                    
                                    return manager, mock_executor, mock_strategy_map
    
    @staticmethod
    def create_fault_manager_with_instances():
        """Create fault manager with test instances and servers"""
        manager, mock_executor, mock_strategy_map = FaultManagerTestHelper.create_mock_fault_manager()
        
        # Create test instances
        manager._instances[1] = InstanceMetadata(
            instance_id=1,
            node_managers=[NodeManagerInfo(pod_ip="192.168.1.1", host_ip="192.168.1.1", port="8080")]
        )
        manager._instances[2] = InstanceMetadata(
            instance_id=2,
            node_managers=[NodeManagerInfo(pod_ip="192.168.1.2", host_ip="192.168.1.2", port="8080")]
        )
        
        # Create test servers
        manager._servers["192.168.1.1"] = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
        )
        manager._servers["192.168.1.2"] = ServerMetadata(
            pod_ip="192.168.1.2",
            host_ip="192.168.1.2",
        )
        
        return manager, mock_executor, mock_strategy_map


class TestFaultManager:
    """Test cases for FaultManager class"""

    @pytest.fixture
    def fault_manager(self):
        """Create fault manager with proper mocking"""
        manager, _ , _ = FaultManagerTestHelper.create_mock_fault_manager()
        yield manager
        # cleanup
        if not manager._stop_event.is_set():
            manager.stop()

    @pytest.fixture
    def mock_instance(self):
        """Create mock instance"""
        instance = Mock(spec=Instance)
        instance.id = 100
        instance.job_name = "test_job"
        instance.group_id = 1
        instance.role = "prefill"
        instance.update_instance_status = Mock()
        instance.get_node_managers = Mock(return_value=[
            NodeManagerInfo(pod_ip="192.168.1.1", host_ip="192.168.1.1", port="8080")
        ])
        instance.get_endpoints = Mock(return_value={})
        return instance

    @pytest.fixture
    def mock_instance_manager(self, mock_instance):
        """Create mock instance manager"""
        with patch('motor.controller.ft.fault_manager.InstanceManager') as mock_cls:
            instance_manager = Mock()
            mock_cls.return_value = instance_manager
            instance_manager.get_instance_by_podip = Mock(return_value=mock_instance)
            instance_manager.get_instance = Mock(return_value=mock_instance)
            instance_manager.notify = Mock()
            yield instance_manager

    def setup_method(self):
        """Setup for each test method"""
        # Clear singleton instances before each test
        if hasattr(FaultManager, '_instances') and FaultManager in FaultManager._instances:
            try:
                FaultManager._instances[FaultManager].stop()
            except:
                pass
            if FaultManager in FaultManager._instances:
                del FaultManager._instances[FaultManager]
    
    def teardown_method(self):
        """Cleanup after each test method"""
        # Stop any running FaultManager instances
        if hasattr(FaultManager, '_instances') and FaultManager in FaultManager._instances:
            try:
                FaultManager._instances[FaultManager].stop()
            except:
                pass

    def test_initialization(self, fault_manager):
        """Test FaultManager initialization"""
        assert isinstance(fault_manager._stop_event, threading.Event)
        assert isinstance(fault_manager._lock, type(threading.Lock()))
        assert isinstance(fault_manager._servers, dict)
        assert isinstance(fault_manager._instances, dict)
        assert isinstance(fault_manager._groups, dict)

        # Check thread creation
        assert fault_manager._server_status_subscriber_thread.is_alive()
        assert fault_manager._ft_strategy_center_thread.is_alive()
        assert fault_manager._server_status_subscriber_thread.daemon is True
        assert fault_manager._ft_strategy_center_thread.daemon is True

    def test_device_fault_info_creation(self):
        """Test DeviceFaultInfo dataclass creation"""
        fault_info = DeviceFaultInfo(
            device_type="npu",
            rank_id=0,
            fault_code=0x1234,
            fault_level="L3",
            fault_type="HARDWARE",
            fault_reason="Memory failure"
        )
        
        assert fault_info.device_type == "npu"
        assert fault_info.rank_id == 0
        assert fault_info.fault_code == 0x1234
        assert fault_info.fault_level == "L3"
        assert fault_info.fault_type == "HARDWARE"
        assert fault_info.fault_reason == "Memory failure"

    def test_server_metadata_creation(self):
        """Test ServerMetadata dataclass creation"""
        server_metadata = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
            status=Status.HEALTHY
        )
        
        assert server_metadata.pod_ip == "192.168.1.1"
        assert server_metadata.host_ip == "192.168.1.1"
        assert server_metadata.status == Status.HEALTHY
        assert server_metadata.device_fault_infos == []

    def test_instance_metadata_creation(self):
        """Test InstanceMetadata dataclass creation"""
        node_managers = [NodeManagerInfo(pod_ip="192.168.1.1", host_ip="192.168.1.1", port="8080")]
        instance_metadata = InstanceMetadata(
            instance_id=100,
            status=Status.HEALTHY,
            node_managers=node_managers
        )
        
        assert instance_metadata.instance_id == 100
        assert instance_metadata.status == Status.HEALTHY
        assert instance_metadata.node_managers == node_managers
        assert instance_metadata.fault_level == "L0"
        assert instance_metadata.fault_code == 0x0
        assert instance_metadata.strategy is None

    def test_instance_group_metadata_creation(self):
        """Test InstanceGroupMetadata dataclass creation"""
        group_metadata = InstanceGroupMetadata(
            id=1,
            p_ids=[100, 101],
            d_ids=[200, 201]
        )
        
        assert group_metadata.id == 1
        assert group_metadata.p_ids == [100, 101]
        assert group_metadata.d_ids == [200, 201]

    def test_process_cluster_fault_message_normal_signal(self, fault_manager):
        """Test processing normal signal type - should return early"""
        fault_msg = cluster_fault_pb2.FaultMsgSignal()
        fault_msg.signalType = "normal"
        
        # Should return early without processing
        fault_manager._process_cluster_fault_message(fault_msg)
        
        # No assertions needed as method should return early

    def test_process_cluster_fault_message_unhealthy_node(self, fault_manager, mock_instance_manager):
        """Test processing unhealthy node fault message"""
        # Setup server metadata
        fault_manager._servers["192.168.1.1"] = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
            status=Status.HEALTHY
        )
        
        # Setup instance metadata to test _update_instances_status call
        fault_manager._instances[1] = InstanceMetadata(
            instance_id=1,
            node_managers=[NodeManagerInfo(pod_ip="192.168.1.1", host_ip="192.168.1.1", port="8080")]
        )
        
        # Create fault message
        fault_msg = cluster_fault_pb2.FaultMsgSignal()
        fault_msg.signalType = "fault"
        
        # Create node fault info
        node_fault_info = fault_msg.nodeFaultInfo.add()
        node_fault_info.nodeName = "node_0"
        node_fault_info.nodeIP = "192.168.1.1"
        node_fault_info.nodeSN = "SN0:06d"
        node_fault_info.faultLevel = "unhealthy"
        
        # Add device fault info
        device_fault = node_fault_info.faultDevice.add()
        device_fault.deviceId = "device_0"
        device_fault.deviceType = "SERVER"
        device_fault.faultCodes.append("ERR001")
        device_fault.faultLevel = "CRITICAL"
        device_fault.faultType.append("HARDWARE")
        device_fault.faultReason.append("Memory failure")
        
        # Mock instance
        mock_instance = Mock()
        mock_instance.update_instance_status = Mock()
        mock_instance_manager.get_instance_by_podip.return_value = mock_instance
        
        # Mock _update_instances_status to verify it's called
        with patch.object(fault_manager, '_update_instances_status') as mock_update_status:
            fault_manager._process_cluster_fault_message(fault_msg)
            
            # Verify _update_instances_status was called
            mock_update_status.assert_called_once()
        
        # Verify server status updated
        assert fault_manager._servers["192.168.1.1"].status == Status.UNHEALTHY
        assert len(fault_manager._servers["192.168.1.1"].device_fault_infos) == 1
        
        # Verify instance manager calls
        mock_instance_manager.get_instance_by_podip.assert_called_once_with("192.168.1.1")
        mock_instance.update_instance_status.assert_called_once_with(InsStatus.INACTIVE)
        mock_instance_manager.notify.assert_called_once_with(
            mock_instance, ObserverEvent.INSTANCE_REMOVED
        )

    def test_process_cluster_fault_message_healthy_node(self, fault_manager):
        """Test processing healthy node fault message"""
        # Setup server metadata as unhealthy
        fault_manager._servers["192.168.1.1"] = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
            status=Status.UNHEALTHY
        )
        
        # Setup instance metadata to test _update_instances_status call
        fault_manager._instances[1] = InstanceMetadata(
            instance_id=1,
            node_managers=[NodeManagerInfo(pod_ip="192.168.1.1", host_ip="192.168.1.1", port="8080")]
        )
        
        # Create fault message
        fault_msg = cluster_fault_pb2.FaultMsgSignal()
        fault_msg.signalType = "fault"
        
        # Create node fault info
        node_fault_info = fault_msg.nodeFaultInfo.add()
        node_fault_info.nodeName = "node_0"
        node_fault_info.nodeIP = "192.168.1.1"
        node_fault_info.nodeSN = "SN0:06d"
        node_fault_info.faultLevel = "healthy"
        
        # Mock _update_instances_status to verify it's called
        with patch.object(fault_manager, '_update_instances_status') as mock_update_status:
            fault_manager._process_cluster_fault_message(fault_msg)
            
            # Verify _update_instances_status was called
            mock_update_status.assert_called_once()
        
        # Verify server status updated to healthy
        assert fault_manager._servers["192.168.1.1"].status == Status.HEALTHY

    def test_process_cluster_fault_message_unknown_server(self, fault_manager):
        """Test processing fault message for unknown server - should handle gracefully"""
        fault_msg = cluster_fault_pb2.FaultMsgSignal()
        fault_msg.signalType = "fault"
        
        # Create node fault info for unknown server
        node_fault_info = fault_msg.nodeFaultInfo.add()
        node_fault_info.nodeName = "node_0"
        node_fault_info.nodeIP = "192.168.1.99"  # Unknown server
        node_fault_info.nodeSN = "SN0:06d"
        node_fault_info.faultLevel = "healthy"
        
        # Should handle unknown server gracefully without raising exception
        # The method should skip processing for unknown servers
        fault_manager._process_cluster_fault_message(fault_msg)
        
        # Verify no server metadata was created for unknown server
        assert "192.168.1.99" not in fault_manager._servers

    def test_process_cluster_fault_message_none_input(self, fault_manager):
        """Test processing None fault message - should handle gracefully"""
        # Should handle None input gracefully without raising exception
        fault_manager._process_cluster_fault_message(None)
        # No assertions needed as method should return early

    def test_process_cluster_fault_message_missing_attributes(self, fault_manager):
        """Test processing fault message with missing attributes - should handle gracefully"""
        # Create fault message without signalType by not setting it
        fault_msg = cluster_fault_pb2.FaultMsgSignal()
        # Don't set signalType to simulate missing attribute
        
        # Should handle missing attributes gracefully
        fault_manager._process_cluster_fault_message(fault_msg)

    def test_process_cluster_fault_message_missing_nodefaultinfo(self, fault_manager):
        """Test processing fault message with missing nodeFaultInfo - should handle gracefully"""
        fault_msg = cluster_fault_pb2.FaultMsgSignal()
        fault_msg.signalType = "fault"
        # Don't set nodeFaultInfo
        
        # Should handle missing nodeFaultInfo gracefully
        fault_manager._process_cluster_fault_message(fault_msg)

    def test_process_cluster_fault_message_large_device_faults(self, fault_manager):
        """Test processing fault message with large number of device faults - should truncate"""
        # Setup server metadata
        fault_manager._servers["192.168.1.1"] = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
            status=Status.HEALTHY
        )
        
        # Create fault message with many device faults
        fault_msg = cluster_fault_pb2.FaultMsgSignal()
        fault_msg.signalType = "fault"
        
        # Create node fault info with many device faults
        node_fault_info = cluster_fault_pb2.NodeFaultInfo()
        node_fault_info.nodeName = "node_0"
        node_fault_info.nodeIP = "192.168.1.1"
        node_fault_info.nodeSN = "SN0:06d"
        node_fault_info.faultLevel = "unhealthy"
        
        # Create many device faults (more than 1000)
        for i in range(1500):
            device_fault = node_fault_info.faultDevice.add()
            device_fault.deviceId = f"device_{i}"
            device_fault.deviceType = "SERVER"
            device_fault.faultCodes.append(f"ERR{i:03d}")
            device_fault.faultLevel = "CRITICAL"
            device_fault.faultType.append("HARDWARE")
            device_fault.faultReason.append(f"Fault reason {i}")
        
        fault_msg.nodeFaultInfo.append(node_fault_info)
        
        fault_manager._process_cluster_fault_message(fault_msg)
        
        # Verify device faults were truncated to 1000
        assert len(fault_manager._servers["192.168.1.1"].device_fault_infos) == 1000

    def test_process_cluster_fault_message_invalid_nodeinfo(self, fault_manager):
        """Test processing fault message with invalid node info - should handle gracefully"""
        fault_msg = cluster_fault_pb2.FaultMsgSignal()
        fault_msg.signalType = "fault"
        
        # Create node info without required fields
        node_info = fault_msg.nodeFaultInfo.add()
        # Don't set nodeIP or faultLevel to simulate invalid node info
        
        # Should handle invalid node info gracefully
        fault_manager._process_cluster_fault_message(fault_msg)

    def test_process_cluster_fault_message_external_call_failure(self, fault_manager, mock_instance_manager):
        """Test processing fault message when external calls fail - should handle gracefully"""
        # Setup server metadata
        fault_manager._servers["192.168.1.1"] = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
            status=Status.HEALTHY
        )
        
        # Create fault message
        fault_msg = cluster_fault_pb2.FaultMsgSignal()
        fault_msg.signalType = "fault"
        
        # Create node fault info
        node_fault_info = fault_msg.nodeFaultInfo.add()
        node_fault_info.nodeName = "node_0"
        node_fault_info.nodeIP = "192.168.1.1"
        node_fault_info.nodeSN = "SN0:06d"
        node_fault_info.faultLevel = "unhealthy"
        
        # Mock instance manager to raise exception
        mock_instance_manager.get_instance_by_podip.side_effect = Exception("Database connection failed")
        
        # Should handle external call failure gracefully
        fault_manager._process_cluster_fault_message(fault_msg)
        
        # Verify server status was still updated despite external call failure
        assert fault_manager._servers["192.168.1.1"].status == Status.UNHEALTHY

    def test_eval_server_status_healthy_server(self, fault_manager):
        """Test _eval_server_status with healthy server"""
        # Setup healthy server
        fault_manager._servers["192.168.1.1"] = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
            status=Status.HEALTHY,
            device_fault_infos=[]
        )
        
        result = fault_manager._eval_server_status("192.168.1.1")
        assert result is None

    def test_eval_server_status_unhealthy_server(self, fault_manager):
        """Test _eval_server_status with unhealthy server"""
        # Setup unhealthy server with device faults
        device_faults = [
            DeviceFaultInfo(device_type="npu", rank_id=0, fault_code=0x1000, fault_level="L2"),
            DeviceFaultInfo(device_type="npu", rank_id=1, fault_code=0x2000, fault_level="L4"),
            DeviceFaultInfo(device_type="npu", rank_id=2, fault_code=0x3000, fault_level="L3")
        ]
        
        fault_manager._servers["192.168.1.1"] = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
            status=Status.UNHEALTHY,
            device_fault_infos=device_faults
        )
        
        result = fault_manager._eval_server_status("192.168.1.1")
        assert result is not None
        assert result.fault_level == "L4"  # Highest level
        assert result.fault_code == 0x2000  # Highest code

    def test_eval_server_status_unknown_server(self, fault_manager):
        """Test _eval_server_status with unknown server"""
        with pytest.raises(ValueError, match="Server 192.168.1.99 not found"):
            fault_manager._eval_server_status("192.168.1.99")

    def test_update_instances_status_healthy_instance(self, fault_manager):
        """Test _update_instances_status with healthy instance"""
        # Setup healthy server
        fault_manager._servers["192.168.1.1"] = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
            status=Status.HEALTHY,
            device_fault_infos=[]
        )
        
        # Setup instance metadata
        instance_metadata = InstanceMetadata(
            instance_id=1,
            node_managers=[NodeManagerInfo(pod_ip="192.168.1.1", host_ip="192.168.1.1", port="8080")]
        )
        fault_manager._instances[1] = instance_metadata
        
        fault_manager._update_instances_status()
        
        # Verify instance metadata remains healthy
        assert instance_metadata.fault_level == "L0"
        assert instance_metadata.fault_code == 0x0

    def test_update_instances_status_unhealthy_instance(self, fault_manager):
        """Test _update_instances_status with unhealthy instance"""
        # Setup unhealthy server with device faults
        device_faults = [
            DeviceFaultInfo(device_type="npu", rank_id=0, fault_code=0x1000, fault_level="L2"),
            DeviceFaultInfo(device_type="npu", rank_id=1, fault_code=0x2000, fault_level="L4"),
            DeviceFaultInfo(device_type="npu", rank_id=2, fault_code=0x3000, fault_level="L3")
        ]
        
        fault_manager._servers["192.168.1.1"] = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
            status=Status.UNHEALTHY,
            device_fault_infos=device_faults
        )
        
        # Setup instance metadata
        instance_metadata = InstanceMetadata(
            instance_id=1,
            node_managers=[NodeManagerInfo(pod_ip="192.168.1.1", host_ip="192.168.1.1", port="8080")]
        )
        fault_manager._instances[1] = instance_metadata
        
        fault_manager._update_instances_status()
        
        # Verify instance metadata updated with highest fault level
        assert instance_metadata.fault_level == "L4"  # Highest level
        assert instance_metadata.fault_code == 0x2000  # Highest code

    def test_update_instances_status_multiple_servers(self, fault_manager):
        """Test _update_instances_status with instance having multiple servers"""
        # Setup multiple servers with different fault levels
        fault_manager._servers["192.168.1.1"] = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
            status=Status.UNHEALTHY,
            device_fault_infos=[
                DeviceFaultInfo(device_type="npu", rank_id=0, fault_code=0x1000, fault_level="L2")
            ]
        )
        
        fault_manager._servers["192.168.1.2"] = ServerMetadata(
            pod_ip="192.168.1.2",
            host_ip="192.168.1.2",
            status=Status.UNHEALTHY,
            device_fault_infos=[
                DeviceFaultInfo(device_type="npu", rank_id=1, fault_code=0x3000, fault_level="L5")
            ]
        )
        
        # Setup instance metadata with multiple node managers
        instance_metadata = InstanceMetadata(
            instance_id=1,
            node_managers=[
                NodeManagerInfo(pod_ip="192.168.1.1", host_ip="192.168.1.1", port="8080"),
                NodeManagerInfo(pod_ip="192.168.1.2", host_ip="192.168.1.2", port="8080")
            ]
        )
        fault_manager._instances[1] = instance_metadata
        
        fault_manager._update_instances_status()
        
        # Verify instance metadata updated with highest fault level from all servers
        assert instance_metadata.fault_level == "L5"  # Highest level from all servers
        assert instance_metadata.fault_code == 0x3000  # Highest code from all servers

    def test_update_instances_status_mixed_servers(self, fault_manager):
        """Test _update_instances_status with instance having mixed healthy/unhealthy servers"""
        # Setup one healthy and one unhealthy server
        fault_manager._servers["192.168.1.1"] = ServerMetadata(
            pod_ip="192.168.1.1",
            host_ip="192.168.1.1",
            status=Status.HEALTHY,
            device_fault_infos=[]
        )
        
        fault_manager._servers["192.168.1.2"] = ServerMetadata(
            pod_ip="192.168.1.2",
            host_ip="192.168.1.2",
            status=Status.UNHEALTHY,
            device_fault_infos=[
                DeviceFaultInfo(device_type="npu", rank_id=1, fault_code=0x2000, fault_level="L3")
            ]
        )
        
        # Setup instance metadata with multiple node managers
        instance_metadata = InstanceMetadata(
            instance_id=1,
            node_managers=[
                NodeManagerInfo(pod_ip="192.168.1.1", host_ip="192.168.1.1", port="8080"),
                NodeManagerInfo(pod_ip="192.168.1.2", host_ip="192.168.1.2", port="8080")
            ]
        )
        fault_manager._instances[1] = instance_metadata
        
        fault_manager._update_instances_status()
        
        # Verify instance metadata updated with fault level from unhealthy server
        assert instance_metadata.fault_level == "L3"  # From unhealthy server
        assert instance_metadata.fault_code == 0x2000  # From unhealthy server

    def test_update_instance_added_prefill_role(self, fault_manager, mock_instance):
        """Test update method with INSTANCE_ADDED event for prefill role"""
        fault_manager.update(mock_instance, ObserverEvent.INSTANCE_ADDED)
        
        # Check instance registration
        assert 100 in fault_manager._instances
        instance_metadata = fault_manager._instances[100]
        assert instance_metadata.instance_id == 100
        assert len(instance_metadata.node_managers) == 1
        assert instance_metadata.node_managers[0].pod_ip == "192.168.1.1"
        
        # Check server metadata creation
        assert "192.168.1.1" in fault_manager._servers
        server = fault_manager._servers["192.168.1.1"]
        assert server.pod_ip == "192.168.1.1"
        assert server.host_ip == "192.168.1.1"
        assert server.status == Status.HEALTHY
        
        # Check group metadata for prefill role
        assert 1 in fault_manager._groups
        group = fault_manager._groups[1]
        assert group.id == 1
        assert 100 in group.p_ids
        assert len(group.d_ids) == 0

    def test_update_instance_added_decode_role(self, fault_manager):
        """Test update method with INSTANCE_ADDED event for decode role"""
        # Mock Instance for decode role
        mock_instance = Mock(spec=Instance)
        mock_instance.id = 200
        mock_instance.job_name = "test_job_decode"
        mock_instance.group_id = 1
        mock_instance.role = "decode"
        mock_instance.get_node_managers.return_value = [
            NodeManagerInfo(pod_ip="192.168.1.2", host_ip="192.168.1.2", port="8080")
        ]
        mock_instance.get_endpoints.return_value = {}
        
        fault_manager.update(mock_instance, ObserverEvent.INSTANCE_ADDED)
        
        # Check group metadata for decode role
        assert 1 in fault_manager._groups
        group = fault_manager._groups[1]
        assert group.id == 1
        assert 200 in group.d_ids
        assert len(group.p_ids) == 0
        
        # Check server metadata for decode role
        assert "192.168.1.2" in fault_manager._servers
        server = fault_manager._servers["192.168.1.2"]
        assert server.pod_ip == "192.168.1.2"
        assert server.host_ip == "192.168.1.2"

    def test_update_instance_separated(self, fault_manager, mock_instance):
        """Test update method with INSTANCE_SEPERATED event"""
        # First add the instance
        fault_manager.update(mock_instance, ObserverEvent.INSTANCE_ADDED)
        
        # Then separate it
        fault_manager.update(mock_instance, ObserverEvent.INSTANCE_SEPERATED)
        
        # Check that instance is marked as separated but not removed
        assert 100 in fault_manager._instances
        assert fault_manager._instances[100].status == Status.UNHEALTHY
        assert "192.168.1.1" in fault_manager._servers
        
        # Check group metadata remains unchanged for separated instance
        group = fault_manager._groups[1]
        assert 100 in group.p_ids  # instance still in group but separated

    def test_update_instance_removed(self, fault_manager, mock_instance):
        """Test update method with INSTANCE_REMOVED event"""
        # First add the instance
        fault_manager.update(mock_instance, ObserverEvent.INSTANCE_ADDED)
        
        # Then remove it
        fault_manager.update(mock_instance, ObserverEvent.INSTANCE_REMOVED)
        
        # Check removal
        assert 100 not in fault_manager._instances
        assert "192.168.1.1" not in fault_manager._servers
        
        # Check group metadata update - group should be removed when empty
        assert 1 not in fault_manager._groups

    def test_update_instance_removed_empty_group(self, fault_manager, mock_instance):
        """Test update method removes group when all instances are removed"""
        # Add instance
        fault_manager.update(mock_instance, ObserverEvent.INSTANCE_ADDED)
        
        # Remove instance
        fault_manager.update(mock_instance, ObserverEvent.INSTANCE_REMOVED)
        
        # Group should be removed when both p_ids and d_ids are empty
        assert 1 not in fault_manager._groups

    def test_update_invalid_event(self, fault_manager, mock_instance):
        """Test update method with invalid event"""
        with pytest.raises(ValueError, match="Invalid event"):
            fault_manager.update(mock_instance, "INVALID_EVENT")

    @pytest.fixture
    def fault_manager_with_instances(self):
        """Create fault manager with test instances and servers"""
        manager, _ , _ = FaultManagerTestHelper.create_fault_manager_with_instances()
        yield manager
        if not manager._stop_event.is_set():
            manager.stop()

    def test_ft_strategy_center_healthy_instances(self, fault_manager_with_instances):
        """Test _ft_strategy_center with healthy instances - should not start any strategy"""
        manager = fault_manager_with_instances
        
        # Mock time.sleep to control the loop execution
        def mock_sleep(seconds):
            raise StopIteration
        
        with patch('motor.controller.ft.fault_manager.time') as mock_time:
            mock_time.sleep.side_effect = mock_sleep
            try:
                manager._ft_strategy_center()
            except StopIteration:
                pass
        
        # Verify no strategies were submitted
        manager._executor.submit.assert_not_called()
        
        # Verify instance metadata remains unchanged
        for _, ins_metadata in manager._instances.items():
            assert ins_metadata.strategy is None
            assert ins_metadata.fault_level == "L0"
            assert ins_metadata.fault_code == 0x0

    def test_ft_strategy_center_unhealthy_instances(self, fault_manager_with_instances):
        """Test _ft_strategy_center with unhealthy instances - should start recovery strategy"""
        manager = fault_manager_with_instances
        
        # Make one server unhealthy
        manager._servers["192.168.1.1"].status = Status.UNHEALTHY
        manager._servers["192.168.1.1"].device_fault_infos = [
            DeviceFaultInfo(device_type="npu", rank_id=0, fault_code=0x1234, fault_level="L3")
        ]
        
        # First update instance status to reflect the server fault
        manager._update_instances_status()
        
        # Mock time.sleep to control the loop execution
        def mock_sleep(seconds):
            raise StopIteration
        
        with patch('motor.controller.ft.fault_manager.time') as mock_time:
            mock_time.sleep.side_effect = mock_sleep
            try:
                manager._ft_strategy_center()
            except StopIteration:
                pass
        
        # Verify strategy was submitted for the unhealthy instance
        manager._executor.submit.assert_called_once()
        call_args = manager._executor.submit.call_args

        # Verify the strategy execution parameters
        execute_method = call_args[0][0]  # First argument is the execute method
        instance_id = call_args[0][1]  # Second argument is the instance id

        assert execute_method is not None
        assert instance_id == 1
        
        # Verify instance metadata was updated
        ins_metadata = manager._instances[1]
        assert ins_metadata.strategy is not None
        assert ins_metadata.fault_level == "L3"
        assert ins_metadata.fault_code == 0x1234

    def test_ft_strategy_center_strategy_levels(self, fault_manager_with_instances):
        """Test different error levels trigger appropriate strategies"""
        manager = fault_manager_with_instances
        
        # Test L2 level error
        manager._servers["192.168.1.1"].status = Status.UNHEALTHY
        manager._servers["192.168.1.1"].device_fault_infos = [
            DeviceFaultInfo(device_type="npu", rank_id=0, fault_code=0x00f1fef5, fault_level="L2")
        ]
        
        # First update instance status to reflect the server fault
        manager._update_instances_status()
        
        # Mock time.sleep to control the loop execution
        def mock_sleep(seconds):
            raise StopIteration
        
        with patch('motor.controller.ft.fault_manager.time') as mock_time:
            mock_time.sleep.side_effect = mock_sleep
            try:
                manager._ft_strategy_center()
            except StopIteration:
                pass

        # Verify L2 strategy was called
        manager._strategies["L2"].assert_called_once_with(0x00f1fef5, 1)

    def test_ft_strategy_center_strategy_finished(self, fault_manager_with_instances):
        """Test that finished strategies are cleaned up"""
        manager = fault_manager_with_instances
        
        # Setup instance with finished strategy
        mock_strategy = Mock()
        mock_strategy.is_finished.return_value = True
        manager._instances[1].strategy = mock_strategy
        
        # Mock time.sleep to control the loop execution
        def mock_sleep(seconds):
            raise StopIteration
        
        with patch('motor.controller.ft.fault_manager.time') as mock_time:
            mock_time.sleep.side_effect = mock_sleep
            try:
                manager._ft_strategy_center()
            except StopIteration:
                pass
        
        # Verify strategy was cleaned up
        assert manager._instances[1].strategy is None

    def test_ft_strategy_center_multiple_instances(self, fault_manager_with_instances):
        """Test handling multiple instances with different health statuses"""
        manager = fault_manager_with_instances
        
        # Make first instance unhealthy
        manager._servers["192.168.1.1"].status = Status.UNHEALTHY
        manager._servers["192.168.1.1"].device_fault_infos = [
            DeviceFaultInfo(device_type="npu", rank_id=0, fault_code=0x3000, fault_level="L3")
        ]
        
        # First update instance status to reflect the server fault
        manager._update_instances_status()
        
        # Mock time.sleep to control the loop execution
        def mock_sleep(seconds):
            raise StopIteration
        
        with patch('motor.controller.ft.fault_manager.time') as mock_time:
            mock_time.sleep.side_effect = mock_sleep
            try:
                manager._ft_strategy_center()
            except StopIteration:
                pass
        
        # Verify strategy was only submitted for the unhealthy instance
        assert manager._executor.submit.call_count == 1
        
        # Verify only the unhealthy instance has strategy metadata
        assert manager._instances[1].strategy is not None
        assert manager._instances[1].fault_level == "L3"
        assert manager._instances[2].strategy is None
        assert manager._instances[2].fault_level == "L0"  # Healthy instance should have L0

    def test_ft_strategy_center_fault_upgrade(self, fault_manager_with_instances):
        """Test fault upgrade scenario - stop current strategy and start new higher level strategy"""
        manager = fault_manager_with_instances
        
        # Setup initial L2 strategy
        mock_l2_strategy = Mock()
        mock_l2_strategy.is_finished.return_value = False
        mock_l2_strategy.stop = Mock()
        
        # Set instance with existing L2 strategy
        manager._instances[1].strategy = mock_l2_strategy
        manager._instances[1].fault_level = "L2"
        manager._instances[1].fault_code = 0x2000
        
        # Make server unhealthy with L4 level fault (upgrade from L2 to L4)
        manager._servers["192.168.1.1"].status = Status.UNHEALTHY
        manager._servers["192.168.1.1"].device_fault_infos = [
            DeviceFaultInfo(device_type="npu", rank_id=0, fault_code=0x4000, fault_level="L4")
        ]
        
        # First update instance status to reflect the server fault
        manager._update_instances_status()
        
        # Mock time.sleep to control the loop execution
        def mock_sleep(seconds):
            raise StopIteration
        
        with patch('motor.controller.ft.fault_manager.time') as mock_time:
            mock_time.sleep.side_effect = mock_sleep
            try:
                manager._ft_strategy_center()
            except StopIteration:
                pass
        
        # Verify old strategy was stopped
        mock_l2_strategy.stop.assert_called_once()
        
        # Verify new L4 strategy was submitted
        assert manager._executor.submit.call_count == 1
        call_args = manager._executor.submit.call_args

        # Verify the strategy execution parameters
        execute_method = call_args[0][0]  # First argument is the execute method
        instance_id = call_args[0][1]  # Second argument is the instance id

        assert execute_method is not None
        assert instance_id == 1
        
        # Verify instance metadata was updated with new strategy
        ins_metadata = manager._instances[1]
        assert ins_metadata.strategy is not None
        assert ins_metadata.fault_level == "L4"
        assert ins_metadata.fault_code == 0x4000
        
        # Verify L4 strategy was called with correct parameters
        manager._strategies["L4"].assert_called_once_with(0x4000, 1)

    def test_ft_strategy_center_fault_downgrade(self, fault_manager_with_instances):
        """Test fault downgrade scenario - should stop current strategy and start new lower level strategy"""
        manager = fault_manager_with_instances
        
        # Setup initial L4 strategy
        mock_l4_strategy = Mock()
        mock_l4_strategy.is_finished.return_value = False
        mock_l4_strategy.stop = Mock()
        
        # Set instance with existing L4 strategy
        manager._instances[1].strategy = mock_l4_strategy
        manager._instances[1].fault_level = "L4"
        manager._instances[1].fault_code = 0x4000
        
        # Make server unhealthy with L2 level fault (downgrade from L4 to L2)
        manager._servers["192.168.1.1"].status = Status.UNHEALTHY
        manager._servers["192.168.1.1"].device_fault_infos = [
            DeviceFaultInfo(device_type="npu", rank_id=0, fault_code=0x2000, fault_level="L2")
        ]
        
        # First update instance status to reflect the server fault
        manager._update_instances_status()
        
        # Mock time.sleep to control the loop execution
        def mock_sleep(seconds):
            raise StopIteration
        
        with patch('motor.controller.ft.fault_manager.time') as mock_time:
            mock_time.sleep.side_effect = mock_sleep
            try:
                manager._ft_strategy_center()
            except StopIteration:
                pass
        
        # Verify old strategy was stopped (now any different strategy triggers stop)
        mock_l4_strategy.stop.assert_called_once()
        
        # Verify new L2 strategy was submitted (now downgrade also starts new strategy)
        assert manager._executor.submit.call_count == 1
        call_args = manager._executor.submit.call_args

        # Verify the strategy execution parameters
        execute_method = call_args[0][0]  # First argument is the execute method
        instance_id = call_args[0][1]  # Second argument is the instance id

        assert execute_method is not None
        assert instance_id == 1
        
        # Verify instance metadata was updated with new strategy
        ins_metadata = manager._instances[1]
        assert ins_metadata.strategy is not None
        assert ins_metadata.fault_level == "L2"  # Fault level is updated to current level
        assert ins_metadata.fault_code == 0x2000  # Fault code is updated to current code
        
        # Verify L2 strategy was called with correct parameters
        manager._strategies["L2"].assert_called_once_with(0x2000, 1)

    def test_ft_strategy_center_same_level_strategy(self, fault_manager_with_instances):
        """Test same level strategy with same fault code - should not stop current strategy but update fault info"""
        manager = fault_manager_with_instances
        
        # Setup initial L3 strategy
        mock_l3_strategy = Mock()
        mock_l3_strategy.is_finished.return_value = False
        mock_l3_strategy.stop = Mock()
        
        # Set instance with existing L3 strategy
        manager._instances[1].strategy = mock_l3_strategy
        manager._instances[1].fault_level = "L3"
        manager._instances[1].fault_code = 0x3000
        
        # Make server unhealthy with same L3 level fault and same fault code
        manager._servers["192.168.1.1"].status = Status.UNHEALTHY
        manager._servers["192.168.1.1"].device_fault_infos = [
            DeviceFaultInfo(device_type="npu", rank_id=0, fault_code=0x3000, fault_level="L3")
        ]
        
        # First update instance status to reflect the server fault
        manager._update_instances_status()
        
        # Mock the strategy factory to return None for same fault code (no new strategy needed)
        original_l3_strategy_factory = manager._strategies["L3"]
        def same_strategy_factory(fault_code, instance_id):
            if fault_code == 0x3000:  # Same fault code
                return None  # No new strategy needed
            else:
                return original_l3_strategy_factory(fault_code, instance_id)
        
        manager._strategies["L3"] = same_strategy_factory
        
        # Mock time.sleep to control the loop execution
        def mock_sleep(seconds):
            raise StopIteration
        
        with patch('motor.controller.ft.fault_manager.time') as mock_time:
            mock_time.sleep.side_effect = mock_sleep
            try:
                manager._ft_strategy_center()
            except StopIteration:
                pass
        
        # Verify old strategy was NOT stopped (same level and same fault code should not stop current strategy)
        mock_l3_strategy.stop.assert_not_called()
        
        # Verify no new strategy was submitted (same level and same fault code should not start new strategy)
        manager._executor.submit.assert_not_called()
        
        # Verify instance metadata - strategy remains the same but fault info is updated
        ins_metadata = manager._instances[1]
        assert ins_metadata.strategy is mock_l3_strategy  # Strategy remains unchanged
        assert ins_metadata.fault_level == "L3"  # Fault level remains L3
        assert ins_metadata.fault_code == 0x3000  # Fault code remains the same

    def test_ft_strategy_center_different_fault_code_same_level(self, fault_manager_with_instances):
        """Test different fault code but same level - should stop current strategy and start new one"""
        manager = fault_manager_with_instances
        
        # Setup initial L3 strategy with specific fault code
        mock_l3_strategy = Mock()
        mock_l3_strategy.is_finished.return_value = False
        mock_l3_strategy.stop = Mock()
        
        # Set instance with existing L3 strategy
        manager._instances[1].strategy = mock_l3_strategy
        manager._instances[1].fault_level = "L3"
        manager._instances[1].fault_code = 0x3000
        
        # Make server unhealthy with same L3 level but different fault code
        manager._servers["192.168.1.1"].status = Status.UNHEALTHY
        manager._servers["192.168.1.1"].device_fault_infos = [
            DeviceFaultInfo(device_type="npu", rank_id=0, fault_code=0x3001, fault_level="L3")
        ]
        
        # First update instance status to reflect the server fault
        manager._update_instances_status()
        
        # Mock time.sleep to control the loop execution
        def mock_sleep(seconds):
            raise StopIteration
        
        with patch('motor.controller.ft.fault_manager.time') as mock_time:
            mock_time.sleep.side_effect = mock_sleep
            try:
                manager._ft_strategy_center()
            except StopIteration:
                pass
        
        # Verify old strategy was stopped (different fault code triggers stop)
        mock_l3_strategy.stop.assert_called_once()
        
        # Verify new L3 strategy was submitted (different fault code starts new strategy)
        assert manager._executor.submit.call_count == 1
        call_args = manager._executor.submit.call_args

        # Verify the strategy execution parameters
        execute_method = call_args[0][0]  # First argument is the execute method
        instance_id = call_args[0][1]  # Second argument is the instance id

        assert execute_method is not None
        assert instance_id == 1
        
        # Verify instance metadata was updated with new strategy
        ins_metadata = manager._instances[1]
        assert ins_metadata.strategy is not None
        assert ins_metadata.fault_level == "L3"  # Fault level remains L3
        assert ins_metadata.fault_code == 0x3001  # Fault code is updated to current code
        
        # Verify L3 strategy was called with correct parameters
        manager._strategies["L3"].assert_called_once_with(0x3001, 1)


class TestFaultManagerIntegration:
    """Integration tests for FaultManager"""
    
    @pytest.fixture
    def fault_manager(self):
        """Create fault manager with proper mocking"""
        manager, _ , _ = FaultManagerTestHelper.create_mock_fault_manager()
        yield manager
        # cleanup
        if not manager._stop_event.is_set():
            manager.stop()
    
    def test_multiple_instances_same_group(self, fault_manager):
        """Test adding multiple instances to the same group"""
        # Add prefill instance
        mock_prefill = Mock(spec=Instance)
        mock_prefill.id = 100
        mock_prefill.job_name = "prefill_job"
        mock_prefill.group_id = 1
        mock_prefill.role = "prefill"
        mock_prefill.get_node_managers.return_value = [
            NodeManagerInfo(pod_ip="192.168.1.1", host_ip="192.168.1.1", port="8080")
        ]
        mock_prefill.get_endpoints.return_value = {}
        
        fault_manager.update(mock_prefill, ObserverEvent.INSTANCE_ADDED)
        
        # Add decode instance to same group
        mock_decode = Mock(spec=Instance)
        mock_decode.id = 200
        mock_decode.job_name = "decode_job"
        mock_decode.group_id = 1
        mock_decode.role = "decode"
        mock_decode.get_node_managers.return_value = [
            NodeManagerInfo(pod_ip="192.168.1.2", host_ip="192.168.1.2", port="8080")
        ]
        mock_decode.get_endpoints.return_value = {}
        
        fault_manager.update(mock_decode, ObserverEvent.INSTANCE_ADDED)
        
        # Check group contains both instances
        group = fault_manager._groups[1]
        assert 100 in group.p_ids
        assert 200 in group.d_ids
        assert len(group.p_ids) == 1
        assert len(group.d_ids) == 1
    
    def test_concurrent_updates(self, fault_manager):
        """Test concurrent updates to FaultManager"""
        # Create multiple mock instances
        instances = []
        for i in range(5):
            mock_instance = Mock(spec=Instance)
            mock_instance.id = i * 100
            mock_instance.job_name = f"job_{i}"
            mock_instance.group_id = i
            mock_instance.role = "prefill" if i % 2 == 0 else "decode"
            mock_instance.get_node_managers.return_value = [
                NodeManagerInfo(pod_ip=f"192.168.1.{i+1}", host_ip=f"192.168.1.{i+1}", port="8080")
            ]
            mock_instance.get_endpoints.return_value = {}
            instances.append(mock_instance)
        
        # Add all instances
        for instance in instances:
            fault_manager.update(instance, ObserverEvent.INSTANCE_ADDED)
        
        # Verify all instances are registered
        assert len(fault_manager._instances) == 5
        assert len(fault_manager._groups) == 5
        
        # Remove all instances
        for instance in instances:
            fault_manager.update(instance, ObserverEvent.INSTANCE_REMOVED)
        
        # Verify all instances are removed
        assert len(fault_manager._instances) == 0
        assert len(fault_manager._groups) == 0

    def test_triggered_update_workflow(self, fault_manager):
        """Test the complete triggered update workflow"""
        # Setup instance and server metadata
        mock_instance = Mock(spec=Instance)
        mock_instance.id = 100
        mock_instance.job_name = "test_job"
        mock_instance.group_id = 1
        mock_instance.role = "prefill"
        mock_instance.get_node_managers.return_value = [
            NodeManagerInfo(pod_ip="192.168.1.1", host_ip="192.168.1.1", port="8080")
        ]
        mock_instance.get_endpoints.return_value = {}
        
        # Add instance
        fault_manager.update(mock_instance, ObserverEvent.INSTANCE_ADDED)
        
        # Verify initial state
        assert 100 in fault_manager._instances
        assert "192.168.1.1" in fault_manager._servers
        assert fault_manager._instances[100].fault_level == "L0"
        assert fault_manager._instances[100].fault_code == 0x0
        
        # Simulate fault message processing with _update_instances_status call
        fault_manager._servers["192.168.1.1"].status = Status.UNHEALTHY
        fault_manager._servers["192.168.1.1"].device_fault_infos = [
            DeviceFaultInfo(device_type="npu", rank_id=0, fault_code=0x1234, fault_level="L3")
        ]
        
        # Call _update_instances_status to simulate triggered update
        fault_manager._update_instances_status()
        
        # Verify instance metadata was updated
        assert fault_manager._instances[100].fault_level == "L3"
        assert fault_manager._instances[100].fault_code == 0x1234
        
        # Now test strategy center with updated fault level
        with patch('motor.controller.ft.fault_manager.time') as mock_time:
            def mock_sleep(seconds):
                raise StopIteration
            
            mock_time.sleep.side_effect = mock_sleep
            try:
                fault_manager._ft_strategy_center()
            except StopIteration:
                pass
        
        # Verify strategy was submitted for the updated fault level
        fault_manager._executor.submit.assert_called_once()
        call_args = fault_manager._executor.submit.call_args

        # Verify the strategy execution parameters
        execute_method = call_args[0][0]  # First argument is the execute method
        instance_id = call_args[0][1]  # Second argument is the instance id

        assert execute_method is not None
        assert instance_id == 100
        
        # Verify L3 strategy was called with correct parameters
        fault_manager._strategies["L3"].assert_called_once_with(0x1234, 100)

    def test_fault_message_triggers_instance_update(self, fault_manager):
        """Test that fault message processing triggers instance status update"""
        # Setup instance and server metadata
        mock_instance = Mock(spec=Instance)
        mock_instance.id = 200
        mock_instance.job_name = "test_job_2"
        mock_instance.group_id = 2
        mock_instance.role = "decode"
        mock_instance.get_node_managers.return_value = [
            NodeManagerInfo(pod_ip="192.168.1.2", host_ip="192.168.1.2", port="8080")
        ]
        mock_instance.get_endpoints.return_value = {}
        
        # Add instance
        fault_manager.update(mock_instance, ObserverEvent.INSTANCE_ADDED)
        
        # Verify initial state
        assert fault_manager._instances[200].fault_level == "L0"  # Default level
        assert fault_manager._instances[200].fault_code == 0x0  # Default code
        
        # Create fault message
        fault_msg = cluster_fault_pb2.FaultMsgSignal()
        fault_msg.signalType = "fault"
        
        # Create node fault info
        node_fault_info = fault_msg.nodeFaultInfo.add()
        node_fault_info.nodeName = "node_1"
        node_fault_info.nodeIP = "192.168.1.2"
        node_fault_info.nodeSN = "SN1:06d"
        node_fault_info.faultLevel = "unhealthy"
        
        # Add device fault info - create DeviceFaultInfo objects directly
        device_fault_info = DeviceFaultInfo(
            device_type="SERVER",
            rank_id=-1,
            fault_code=0x2000,
            fault_level="L3",
            fault_type="HARDWARE",
            fault_reason="Disk failure"
        )
        
        # Process fault message first to update server status
        fault_manager._process_cluster_fault_message(fault_msg)
        
        # Then set the device fault info in the server metadata
        fault_manager._servers["192.168.1.2"].device_fault_infos = [device_fault_info]
        fault_manager._servers["192.168.1.2"].status = Status.UNHEALTHY
        
        # Verify server status was updated
        assert fault_manager._servers["192.168.1.2"].status == Status.UNHEALTHY
        
        # Manually call _update_instances_status to simulate the triggered update
        fault_manager._update_instances_status()
        
        # Verify instance metadata was updated through _update_instances_status
        assert fault_manager._instances[200].fault_level == "L3"  # From device fault info
        assert fault_manager._instances[200].fault_code == 0x2000  # From device fault info
