use std::sync::Arc;
use std::time::Duration;
use anyhow::Result;

use protoactor_rs::remote::{Remote, Config as RemoteConfigBuilder};
use protoactor_rs::cluster::{
    ClusterSystem, ClusterConfig, ClusterEvent,
    PartitionManagerConfig, PartitionState, PartitionId,
};
use protoactor_rs::system::ActorSystem;

#[tokio::main]
async fn main() -> Result<()> {
    // 初始化日志
    tracing_subscriber::fmt::init();
    
    // 模拟两个节点的集群
    if std::env::args().nth(1) == Some("node1".to_string()) {
        run_node("node1", 8090, true).await?;
    } else if std::env::args().nth(1) == Some("node2".to_string()) {
        run_node("node2", 8091, false).await?;
    } else {
        println!("Usage: cargo run --example cluster_partitions -- [node1|node2]");
        println!("       Run two instances with different node IDs to simulate a cluster");
        println!("       node1 will be the master node that initializes the partition manager");
    }
    
    Ok(())
}

async fn run_node(node_id: &str, port: u16, is_master: bool) -> Result<()> {
    println!("Starting node: {}", node_id);
    
    // 创建actor系统
    let system = ActorSystem::new();
    
    // 创建远程配置
    let remote_config = RemoteConfigBuilder::new()
        .with_host(&format!("127.0.0.1:{}", port))
        .build();
    
    // 创建远程处理模块
    let mut remote = Remote::new(system.clone(), remote_config);
    remote.start().await?;
    
    // 创建集群配置
    let cluster_config = ClusterConfig {
        name: node_id.to_string(),  // 使用节点ID作为角色
        host: "127.0.0.1".to_string(),
        port,
        heartbeat_interval: 1,
        unreachable_timeout: 5,
        down_timeout: 10,
    };
    
    // 分区配置
    let partition_config = PartitionManagerConfig {
        partition_count: 10,  // 10个分区便于演示
        replica_count: 1,     // 每个分区1个副本，简化演示
        virtual_node_count: 50, // 50个虚拟节点
    };
    
    // 创建集群系统，并添加分区管理器
    let (mut cluster, mut cluster_events) = ClusterSystem::new_with_memory_provider(
        cluster_config, 
        Arc::new(remote)
    );
    
    let cluster = if is_master {
        cluster.with_partition_manager(partition_config)
    } else {
        cluster
    };
    
    // 启动集群系统
    cluster.start().await?;
    
    // 监听集群事件
    tokio::spawn(async move {
        while let Some(event) = cluster_events.recv().await {
            println!("Cluster event: {:?}", event);
        }
    });
    
    println!("Node {} started", node_id);
    
    // 如果是主节点，每隔一段时间查看并打印分区状态
    if is_master {
        // 等待节点2启动
        tokio::time::sleep(Duration::from_secs(5)).await;
        
        // 检查分区状态
        println!("Checking partition status...");
        
        if let Some(pm) = cluster.get_partition_manager() {
            // 获取所有分区
            let partitions = pm.get_all_partitions().await;
            println!("Total partitions: {}", partitions.len());
            
            for partition in partitions {
                let owner = partition.owner.clone().unwrap_or_else(|| "None".to_string());
                let replicas = partition.replica_nodes.join(", ");
                
                println!("Partition {}: Owner={}, State={:?}, Replicas=[{}]", 
                    partition.id, owner, partition.state, replicas);
            }
            
            // 测试根据键获取分区
            for i in 0..5 {
                let key = format!("test_key_{}", i);
                let partition_id = pm.get_partition_for_key(&key);
                let partition = pm.get_partition(partition_id).await.unwrap();
                
                println!("Key '{}' -> Partition {} (Owner: {})", 
                    key, partition_id, partition.owner.unwrap_or_else(|| "None".to_string()));
            }
        } else {
            println!("Partition manager not initialized");
        }
    }
    
    // 保持程序运行
    loop {
        tokio::time::sleep(Duration::from_secs(1)).await;
    }
} 