#![cfg(test)]

use nacos_cluster::{ClusterConfig, ClusterManager, ClusterNodeInfo, NodeRole, NodeState};
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::task;
use tokio::time::{sleep, Duration};

/// 模拟多端口集群环境
#[tokio::test]
async fn test_multi_port_cluster_simulation() {
    let ports = vec![8848, 8849, 8850, 8851, 8852];
    let mut managers = vec![];
    let (tx, mut rx) = mpsc::unbounded_channel();
    
    // 创建多个集群管理器，每个监听不同端口
    for (i, port) in ports.iter().enumerate() {
        let config = ClusterConfig {
            cluster_name: "multi-port-cluster".to_string(),
            current_node_id: format!("node{}", i + 1),
            heartbeat_timeout: 2,
            ..Default::default()
        };
        
        let manager = Arc::new(ClusterManager::new(config));
        manager.set_event_sender(tx.clone());
        
        // 创建节点绑定到特定端口
        let node = ClusterNodeInfo::new("127.0.0.1", *port, (i + 1) as u32);
        manager.initialize(node.clone()).await.unwrap();
        
        managers.push((manager, *port, format!("node{}", i + 1)));
    }
    
    // 将所有节点添加到第一个管理器（模拟节点发现）
    let primary_manager = &managers[0].0;
    for (_, port, node_id) in &managers[1..] {
        let node = ClusterNodeInfo::new("127.0.0.1", *port, 1);
        primary_manager.add_node(node).await.unwrap();
    }
    
    // 验证集群状态
    assert_eq!(primary_manager.get_cluster_size(), 5);
    
    // 设置权重最高的节点为领导者
    primary_manager.set_leader("node5").await.unwrap();
    
    let leader = primary_manager.get_leader().unwrap();
    assert_eq!(leader.node_id, "node5");
    assert_eq!(leader.weight, 5);
    
    // 模拟不同端口的节点故障
    let node_to_fail = ClusterNodeInfo::new("127.0.0.1", 8850, 3);
    primary_manager.update_node_state(&node_to_fail.node_id, NodeState::Failed).await.unwrap();
    
    assert_eq!(primary_manager.get_healthy_nodes().len(), 4);
}

/// 测试异步端口启动
#[tokio::test]
async fn test_async_port_startup() {
    let ports = vec![8860, 8861, 8862, 8863, 8864];
    let mut handles = vec![];
    let (tx, mut rx) = mpsc::unbounded_channel();
    
    // 异步启动所有节点
    for (i, port) in ports.iter().enumerate() {
        let tx_clone = tx.clone();
        let node_id = format!("async-node{}", i + 1);
        let port = *port;
        
        let handle = task::spawn(async move {
            let config = ClusterConfig {
                cluster_name: "async-startup".to_string(),
                current_node_id: node_id.clone(),
                heartbeat_timeout: 3,
                ..Default::default()
            };
            
            let manager = Arc::new(ClusterManager::new(config));
            manager.set_event_sender(tx_clone);
            
            let node = ClusterNodeInfo::new("127.0.0.1", port, 1);
            manager.initialize(node.clone()).await.unwrap();
            
            (manager, port, node_id)
        });
        
        handles.push(handle);
    }
    
    // 等待所有节点启动完成
    let results = futures::future::join_all(handles).await;
    let managers: Vec<Arc<ClusterManager>> = results
        .into_iter()
        .map(|r| r.unwrap().0)
        .collect();
    
    // 使用第一个管理器作为中心节点
    let primary = &managers[0];
    
    // 将所有节点添加到中心节点
    for (i, manager) in managers.iter().enumerate().skip(1) {
        let node = ClusterNodeInfo::new("127.0.0.1", ports[i], (i + 1) as u32);
        primary.add_node(node).await.unwrap();
    }
    
    // 验证集群
    assert_eq!(primary.get_cluster_size(), 5);
    
    // 设置领导者
    primary.set_leader("async-node3").await.unwrap();
    
    let leader = primary.get_leader().unwrap();
    assert_eq!(leader.node_id, "async-node3");
}

/// 测试端口冲突处理
#[tokio::test]
async fn test_port_conflict_handling() {
    let (tx, _rx) = mpsc::unbounded_channel();
    
    let config = ClusterConfig {
        cluster_name: "conflict-test".to_string(),
        ..Default::default()
    };
    
    let manager = Arc::new(ClusterManager::new(config));
    manager.set_event_sender(tx);
    
    // 创建具有相同端口但不同ID的节点
    let node1 = ClusterNodeInfo::new("127.0.0.1", 8870, 1);
    let node2 = ClusterNodeInfo::new("127.0.0.1", 8870, 2); // 相同端口，不同ID
    
    manager.initialize(node1.clone()).await.unwrap();
    
    // 应该能够添加具有相同端口的节点（不同ID）
    manager.add_node(node2.clone()).await.unwrap();
    
    assert_eq!(manager.get_cluster_size(), 2);
    
    // 验证两个节点都有正确的端口信息
    let all_nodes = manager.get_all_nodes();
    let ports: Vec<u16> = all_nodes.iter().map(|n| n.port).collect();
    assert!(ports.contains(&870));
}

/// 测试动态端口分配
#[tokio::test]
async fn test_dynamic_port_allocation() {
    let (tx, _rx) = mpsc::unbounded_channel();
    
    let config = ClusterConfig {
        cluster_name: "dynamic-ports".to_string(),
        ..Default::default()
    };
    
    let manager = Arc::new(ClusterManager::new(config));
    manager.set_event_sender(tx);
    
    // 动态分配端口范围
    let mut current_port = 8880;
    let mut nodes = vec![];
    
    for i in 0..5 {
        let node = ClusterNodeInfo::new("127.0.0.1", current_port + i, (i + 1) as u32);
        nodes.push(node);
    }
    
    manager.initialize(nodes[0].clone()).await.unwrap();
    
    // 使用异步任务添加节点
    let mut handles = vec![];
    for node in &nodes[1..] {
        let manager_clone = manager.clone();
        let node_clone = node.clone();
        
        let handle = task::spawn(async move {
            sleep(Duration::from_millis(50)).await; // 模拟网络延迟
            manager_clone.add_node(node_clone).await
        });
        
        handles.push(handle);
    }
    
    // 等待所有添加操作完成
    for handle in handles {
        handle.await.unwrap().unwrap();
    }
    
    // 验证所有端口都被正确分配
    let all_nodes = manager.get_all_nodes();
    let expected_ports: Vec<u16> = (8880..8885).collect();
    let actual_ports: Vec<u16> = all_nodes.iter().map(|n| n.port).collect();
    
    for port in expected_ports {
        assert!(actual_ports.contains(&port));
    }
}

/// 测试端口健康检查
#[tokio::test]
async fn test_port_health_monitoring() {
    let (tx, _rx) = mpsc::unbounded_channel();
    
    let config = ClusterConfig {
        cluster_name: "health-monitoring".to_string(),
        heartbeat_timeout: 1,
        ..Default::default()
    };
    
    let manager = Arc::new(ClusterManager::new(config));
    manager.set_event_sender(tx);
    
    // 创建监听不同端口的节点
    let ports = vec![8890, 8891, 8892, 8893, 8894];
    let mut nodes = vec![];
    
    for (i, port) in ports.iter().enumerate() {
        let node = ClusterNodeInfo::new("127.0.0.1", *port, 1);
        nodes.push(node);
    }
    
    manager.initialize(nodes[0].clone()).await.unwrap();
    for node in &nodes[1..] {
        manager.add_node(node.clone()).await.unwrap();
    }
    
    // 启动健康监控
    manager.start_heartbeat_check().await;
    
    // 模拟某些端口故障
    sleep(Duration::from_secs(2)).await;
    
    // 验证端口健康状态
    let stats = manager.get_cluster_stats();
    assert_eq!(stats.total_nodes, 5);
    
    // 恢复所有端口
    for node in &nodes {
        manager.update_node_heartbeat(&node.node_id).await.unwrap();
    }
    
    let stats = manager.get_cluster_stats();
    assert_eq!(stats.healthy_nodes, 5);
}

/// 测试端口级故障注入
#[tokio::test]
async fn test_port_level_failure_injection() {
    let (tx, mut rx) = mpsc::unbounded_channel();
    
    let config = ClusterConfig {
        cluster_name: "failure-injection".to_string(),
        heartbeat_timeout: 1,
        ..Default::default()
    };
    
    let manager = Arc::new(ClusterManager::new(config));
    manager.set_event_sender(tx);
    
    // 创建多个端口的节点
    let ports = vec![8900, 8901, 8902, 8903, 8904];
    let mut nodes = vec![];
    
    for (i, port) in ports.iter().enumerate() {
        let node = ClusterNodeInfo::new("127.0.0.1", *port, (i + 1) as u32);
        nodes.push(node);
    }
    
    manager.initialize(nodes[0].clone()).await.unwrap();
    for node in &nodes[1..] {
        manager.add_node(node.clone()).await.unwrap();
    }
    
    // 注入端口故障
    manager.update_node_state(&nodes[2].node_id, NodeState::Failed).await.unwrap(); // 8902
    manager.update_node_state(&nodes[4].node_id, NodeState::Failed).await.unwrap(); // 8904
    
    // 验证故障端口
    let failed_ports: Vec<u16> = manager
        .get_all_nodes()
        .into_iter()
        .filter(|n| n.state == NodeState::Failed)
        .map(|n| n.port)
        .collect();
    
    assert!(failed_ports.contains(&8902));
    assert!(failed_ports.contains(&8904));
    assert_eq!(failed_ports.len(), 2);
    
    // 恢复端口
    manager.update_node_state(&nodes[2].node_id, NodeState::Running).await.unwrap();
    manager.update_node_state(&nodes[4].node_id, NodeState::Running).await.unwrap();
    
    let healthy_ports: Vec<u16> = manager
        .get_healthy_nodes()
        .into_iter()
        .map(|n| n.port)
        .collect();
    
    assert_eq!(healthy_ports.len(), 5);
    assert!(healthy_ports.contains(&8902));
    assert!(healthy_ports.contains(&8904));
}