#![cfg(test)]

use nacos_cluster::{
    ClusterConfig, ClusterManager, DataSyncManager, ConsistencyManager,
    SyncMessage, SyncType, DataOperation, ConsistencyProtocol, ConsistencyLevel,
};
use std::sync::Arc;
use tokio::sync::mpsc;
use tokio::time::{sleep, Duration};

/// 测试数据同步基础功能
#[tokio::test]
async fn test_basic_data_sync() {
    let config = ClusterConfig {
        cluster_name: "sync-test".to_string(),
        current_node_id: "node1".to_string(),
        ..Default::default()
    };

    let cluster_manager = Arc::new(ClusterManager::new(config));
    let data_sync_manager = Arc::new(DataSyncManager::new(
        cluster_manager.clone(),
        3,
        100,
    ));

    let node = nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8848, 1);
    cluster_manager.initialize(node.clone()).await.unwrap();

    // 测试数据存储
    let test_data = b"hello world".to_vec();
    data_sync_manager.store_data(
        "public".to_string(),
        "DEFAULT_GROUP".to_string(),
        "test-key".to_string(),
        test_data.clone(),
        "node1".to_string(),
    ).await.unwrap();

    // 验证数据检索
    let retrieved = data_sync_manager.get_data("public", "DEFAULT_GROUP", "test-key").await.unwrap();
    assert!(retrieved.is_some());
    assert_eq!(retrieved.unwrap().0, test_data);
}

/// 测试数据版本控制
#[tokio::test]
async fn test_data_versioning() {
    let config = ClusterConfig::default();
    let cluster_manager = Arc::new(ClusterManager::new(config));
    let data_sync_manager = Arc::new(DataSyncManager::new(
        cluster_manager.clone(),
        3,
        100,
    ));

    let node = nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8848, 1);
    cluster_manager.initialize(node.clone()).await.unwrap();

    // 存储初始版本
    data_sync_manager.store_data(
        "public".to_string(),
        "DEFAULT_GROUP".to_string(),
        "version-test".to_string(),
        b"v1".to_vec(),
        "node1".to_string(),
    ).await.unwrap();

    let version1 = data_sync_manager.get_data("public", "DEFAULT_GROUP", "version-test").await.unwrap();
    assert_eq!(version1.unwrap().1.version, 1);

    // 更新数据
    data_sync_manager.store_data(
        "public".to_string(),
        "DEFAULT_GROUP".to_string(),
        "version-test".to_string(),
        b"v2".to_vec(),
        "node1".to_string(),
    ).await.unwrap();

    let version2 = data_sync_manager.get_data("public", "DEFAULT_GROUP", "version-test").await.unwrap();
    assert_eq!(version2.unwrap().1.version, 2);
}

/// 测试一致性管理器
#[tokio::test]
async fn test_consistency_manager() {
    let config = ClusterConfig {
        cluster_name: "consistency-test".to_string(),
        current_node_id: "node1".to_string(),
        ..Default::default()
    };

    let cluster_manager = Arc::new(ClusterManager::new(config));
    let data_sync_manager = Arc::new(DataSyncManager::new(
        cluster_manager.clone(),
        3,
        100,
    ));
    let consistency_manager = ConsistencyManager::new(
        data_sync_manager.clone(),
        cluster_manager.clone(),
        ConsistencyProtocol::Eventual,
        30,
    );

    let node = nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8848, 1);
    cluster_manager.initialize(node.clone()).await.unwrap();

    let sync_message = SyncMessage::new(
        "node1".to_string(),
        "public".to_string(),
        "DEFAULT_GROUP".to_string(),
        "consistency-test".to_string(),
        SyncType::Incremental,
        DataOperation::Update,
    ).with_data(b"consistency data".to_vec());

    let proposal_id = consistency_manager.propose_change(
        sync_message,
        ConsistencyLevel::Eventual,
    ).await.unwrap();

    assert!(!proposal_id.is_empty());

    // 验证提案状态
    let status = consistency_manager.get_proposal_status(&proposal_id
    ).await.unwrap();
    assert!(matches!(status, nacos_cluster::ProposalStatus::Committed));
}

/// 测试多节点数据同步场景
#[tokio::test]
async fn test_multi_node_data_sync() {
    let (tx, _rx) = mpsc::unbounded_channel();
    
    let configs = vec![
        ClusterConfig {
            cluster_name: "multi-sync".to_string(),
            current_node_id: "node1".to_string(),
            ..Default::default()
        },
        ClusterConfig {
            cluster_name: "multi-sync".to_string(),
            current_node_id: "node2".to_string(),
            ..Default::default()
        },
        ClusterConfig {
            cluster_name: "multi-sync".to_string(),
            current_node_id: "node3".to_string(),
            ..Default::default()
        },
    ];

    let mut managers = vec![];
    for config in configs {
        let manager = Arc::new(ClusterManager::new(config));
        manager.set_event_sender(tx.clone());
        managers.push(manager);
    }

    // 初始化集群节点
    let nodes = vec![
        nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8848, 3),
        nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8849, 2),
        nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8850, 1),
    ];

    for (i, manager) in managers.iter().enumerate() {
        manager.initialize(nodes[i].clone()).await.unwrap();
    }

    // 将所有节点添加到第一个管理器
    for node in &nodes[1..] {
        managers[0].add_node(node.clone()).await.unwrap();
    }

    // 创建数据同步管理器
    let data_sync_manager = Arc::new(DataSyncManager::new(
        managers[0].clone(),
        3,
        100,
    ));

    // 测试数据分发
    data_sync_manager.store_data(
        "public".to_string(),
        "DEFAULT_GROUP".to_string(),
        "distributed-key".to_string(),
        b"distributed data".to_vec(),
        "node1".to_string(),
    ).await.unwrap();

    // 验证集群大小
    assert_eq!(managers[0].get_cluster_size(), 3);
}

/// 测试增量数据同步
#[tokio::test]
async fn test_incremental_sync() {
    let config = ClusterConfig::default();
    let cluster_manager = Arc::new(ClusterManager::new(config));
    let data_sync_manager = Arc::new(DataSyncManager::new(
        cluster_manager.clone(),
        3,
        100,
    ));

    let node = nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8848, 1);
    cluster_manager.initialize(node.clone()).await.unwrap();

    // 存储多条数据
    for i in 0..10 {
        data_sync_manager.store_data(
            "public".to_string(),
            format!("group-{}", i),
            format!("key-{}", i),
            format!("data-{}", i).into_bytes(),
            "node1".to_string(),
        ).await.unwrap();
    }

    // 验证同步统计
    let stats = data_sync_manager.get_sync_stats().await;
    assert!(stats.total_records >= 10);
}

/// 测试数据冲突解决
#[tokio::test]
async fn test_conflict_resolution() {
    let config = ClusterConfig::default();
    let cluster_manager = Arc::new(ClusterManager::new(config));
    let data_sync_manager = Arc::new(DataSyncManager::new(
        cluster_manager.clone(),
        3,
        100,
    ));

    let node = nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8848, 1);
    cluster_manager.initialize(node.clone()).await.unwrap();

    // 存储初始数据
    data_sync_manager.store_data(
        "public".to_string(),
        "DEFAULT_GROUP".to_string(),
        "conflict-key".to_string(),
        b"initial".to_vec(),
        "node1".to_string(),
    ).await.unwrap();

    // 模拟冲突 - 旧版本数据
    let conflict_message = SyncMessage::new(
        "node2".to_string(),
        "public".to_string(),
        "DEFAULT_GROUP".to_string(),
        "conflict-key".to_string(),
        SyncType::Incremental,
        DataOperation::Update,
    )
    .with_data(b"old data".to_vec());

    // 手动设置旧版本
    let mut message = conflict_message;
    message.version = 0; // 旧版本

    let result = data_sync_manager.process_sync_message(message).await;
    assert!(result.is_ok());

    // 验证数据未被旧版本覆盖
    let data = data_sync_manager.get_data("public", "DEFAULT_GROUP", "conflict-key").await.unwrap();
    assert_eq!(data.unwrap().0, b"initial");
}

/// 测试全量数据同步
#[tokio::test]
async fn test_full_sync() {
    let config = ClusterConfig::default();
    let cluster_manager = Arc::new(ClusterManager::new(config));
    let data_sync_manager = Arc::new(DataSyncManager::new(
        cluster_manager.clone(),
        3,
        100,
    ));

    let node = nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8848, 1);
    cluster_manager.initialize(node.clone()).await.unwrap();

    // 存储多组数据
    for ns in [-"public", "private", "system"] {
        for grp in [-"DEFAULT_GROUP", "SYSTEM_GROUP"] {
            for key in [-"config1", "config2", "config3"] {
                data_sync_manager.store_data(
                    ns.to_string(),
                    grp.to_string(),
                    key.to_string(),
                    format!("{}-{}-{}", ns, grp, key).into_bytes(),
                    "node1".to_string(),
                ).await.unwrap();
            }
        }
    }

    // 执行全量同步
    let result = data_sync_manager.perform_full_sync(Some("target-node".to_string())).await;
    assert!(result.is_ok());

    // 验证同步统计
    let stats = data_sync_manager.get_sync_stats().await;
    assert!(stats.total_records >= 18); // 3*2*3 = 18条数据
}

/// 测试数据删除同步
#[tokio::test]
async fn test_data_deletion_sync() {
    let config = ClusterConfig::default();
    let cluster_manager = Arc::new(ClusterManager::new(config));
    let data_sync_manager = Arc::new(DataSyncManager::new(
        cluster_manager.clone(),
        3,
        100,
    ));

    let node = nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8848, 1);
    cluster_manager.initialize(node.clone()).await.unwrap();

    // 存储数据
    data_sync_manager.store_data(
        "public".to_string(),
        "DEFAULT_GROUP".to_string(),
        "to-delete".to_string(),
        b"will be deleted".to_vec(),
        "node1".to_string(),
    ).await.unwrap();

    // 验证数据存在
    let data = data_sync_manager.get_data("public", "DEFAULT_GROUP", "to-delete").await.unwrap();
    assert!(data.is_some());

    // 删除数据
    data_sync_manager.delete_data(
        "public".to_string(),
        "DEFAULT_GROUP".to_string(),
        "to-delete".to_string(),
        "node1".to_string(),
    ).await.unwrap();

    // 验证数据已删除
    let data = data_sync_manager.get_data("public", "DEFAULT_GROUP", "to-delete").await.unwrap();
    assert!(data.is_none());
}

/// 测试同步统计和清理
#[tokio::test]
async fn test_sync_stats_and_cleanup() {
    let config = ClusterConfig::default();
    let cluster_manager = Arc::new(ClusterManager::new(config));
    let data_sync_manager = Arc::new(DataSyncManager::new(
        cluster_manager.clone(),
        3,
        100,
    ));

    let node = nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8848, 1);
    cluster_manager.initialize(node.clone()).await.unwrap();

    // 存储测试数据
    data_sync_manager.store_data(
        "public".to_string(),
        "DEFAULT_GROUP".to_string(),
        "stats-test".to_string(),
        b"stats data".to_vec(),
        "node1".to_string(),
    ).await.unwrap();

    // 获取统计信息
    let stats = data_sync_manager.get_sync_stats().await;
    assert!(stats.total_records >= 1);

    // 测试清理（设置较短的过期时间）
    let result = data_sync_manager.cleanup_expired_records(0).await;
    assert!(result.is_ok());
}

/// 测试不同一致性级别
#[tokio::test]
async fn test_different_consistency_levels() {
    let config = ClusterConfig::default();
    let cluster_manager = Arc::new(ClusterManager::new(config));
    let data_sync_manager = Arc::new(DataSyncManager::new(
        cluster_manager.clone(),
        3,
        100,
    ));

    let protocols = [
        ConsistencyProtocol::Strong,
        ConsistencyProtocol::Eventual,
        ConsistencyProtocol::Hybrid,
    ];

    for protocol in protocols {
        let consistency_manager = ConsistencyManager::new(
            data_sync_manager.clone(),
            cluster_manager.clone(),
            protocol,
            30,
        );

        let node = nacos_cluster::ClusterNodeInfo::new("127.0.0.1", 8848, 1);
        cluster_manager.initialize(node.clone()).await.unwrap();

        let sync_message = SyncMessage::new(
            "node1".to_string(),
            "public".to_string(),
            "DEFAULT_GROUP".to_string(),
            format!("level-test-{:?}", protocol),
            SyncType::Incremental,
            DataOperation::Update,
        ).with_data(format!("test-data-{:?}", protocol).into_bytes());

        let proposal_id = consistency_manager.propose_change(
            sync_message,
            ConsistencyLevel::Eventual,
        ).await.unwrap();

        assert!(!proposal_id.is_empty());

        let stats = consistency_manager.get_consistency_stats().await;
        assert!(stats.active_proposals >= 0);
    }
}