use anyhow::Result;
use cluster_node_manager::{ClusterManager, Config};
use std::env;
use tracing::{info, Level};
use tracing_subscriber;

/// Distributed cluster example with multiple nodes
#[tokio::main]
async fn main() -> Result<()> {
    // Initialize logging
    tracing_subscriber::fmt().with_max_level(Level::INFO).init();

    // Get node ID from command line argument
    let args: Vec<String> = env::args().collect();
    if args.len() < 2 {
        eprintln!("Usage: {} <node_id>", args[0]);
        eprintln!("Example: {} 1", args[0]);
        std::process::exit(1);
    }

    let node_id: u64 = args[1].parse().expect("Node ID must be a valid number");

    info!(
        "Starting distributed cluster example with node ID: {}",
        node_id
    );

    // Create configuration for this node
    let mut config = Config::default();
    config.node.id = node_id;
    config.node.bind_addr = format!("127.0.0.1:{}", 8000 + node_id);
    config.cluster.name = "distributed-cluster".to_string();
    config.api.bind_addr = format!("127.0.0.1:{}", 8080 + node_id);

    // Set cluster peers (all nodes in the cluster)
    config.cluster.peers = vec![
        "127.0.0.1:8001".to_string(),
        "127.0.0.1:8002".to_string(),
        "127.0.0.1:8003".to_string(),
    ];

    // Create cluster manager
    let cluster_manager = ClusterManager::new(config).await?;

    // Initialize or join cluster based on node ID
    if node_id == 1 {
        info!("Node 1: Initializing new cluster");
        cluster_manager.initialize_cluster().await?;
    } else {
        info!("Node {}: Joining existing cluster", node_id);
        cluster_manager.join_cluster().await?;
    }

    // Start the cluster
    cluster_manager.start().await?;

    info!("Node {} started successfully!", node_id);
    info!("API server running on http://127.0.0.1:{}", 8080 + node_id);
    info!("Metrics available at http://127.0.0.1:9090/metrics");

    // Add some example managed nodes (only from leader)
    tokio::time::sleep(std::time::Duration::from_secs(5)).await;

    if cluster_manager.is_leader().await {
        info!("This node is the leader, adding example managed nodes");

        use cluster_node_manager::cluster::{ManagedNode, NodeStatus};
        use std::collections::HashMap;

        for i in 1..=3 {
            let mut tags = HashMap::new();
            tags.insert("region".to_string(), "us-west".to_string());
            tags.insert("role".to_string(), "worker".to_string());

            let node = ManagedNode {
                id: format!("worker-{:03}", i),
                address: format!("192.168.1.{}:9000", 100 + i),
                tags,
                status: NodeStatus::Unknown,
                last_seen: chrono::Utc::now().timestamp_millis() as u64,
                health_checks: Vec::new(),
            };

            if let Err(e) = cluster_manager.add_managed_node(node).await {
                eprintln!("Failed to add managed node {}: {}", i, e);
            } else {
                info!("Added managed node worker-{:03}", i);
            }
        }
    }

    // Keep running until Ctrl+C
    tokio::signal::ctrl_c().await?;

    info!("Shutting down node {}", node_id);
    cluster_manager.shutdown().await?;

    Ok(())
}
