//! # OpenCloudOS Node Management Server
//!
//! The central server component of the OpenCloudOS node management system.
//! Handles command distribution, result collection, and agent management.
//!
//! ## Features
//! - Agent registration and heartbeat monitoring
//! - Command distribution with group-based routing
//! - Task result collection and persistence
//! - TLS/SSL encrypted communication support
//! - Token-based agent authentication
//! - PostgreSQL database integration
//! - RabbitMQ message queue integration
//!
//! ## Usage
//!
//! Start the server:
//! ```bash
//! node_server start [--enable-tls]
//! ```
//!
//! Send commands to agents:
//! ```bash
//! node_server command [--group GROUP] COMMAND
//! ```

use anyhow::Result;
use clap::{Parser, Subcommand};
use lapin::{
    options::*, types::FieldTable, BasicProperties, Channel, Connection, ConnectionProperties,
    Consumer,
};
use sha2::{Digest, Sha256};
use shared::protocol::{queues, AgentRegistration, HeartbeatMessage, ResultMessage, TaskMessage};
use sqlx::{postgres::PgPoolOptions, PgPool};
use std::sync::Arc;

/// Command-line interface for the node server
#[derive(Parser)]
#[command(name = "node_server")]
#[command(about = "OpenCloudOS 节点管理服务器")]
struct Cli {
    #[command(subcommand)]
    command: Commands,
}

/// Available server commands
#[derive(Subcommand)]
enum Commands {
    /// Start the server daemon
    Start {
        #[arg(long, default_value = "amqp://admin:admin123@127.0.0.1:5672")]
        rabbitmq_url: String,
        #[arg(
            long,
            default_value = "postgres://admin:admin123@localhost:5432/node_management"
        )]
        database_url: String,
        #[arg(long)]
        enable_tls: bool,
        #[arg(long, default_value = "./certs/server.crt")]
        cert_path: String,
        #[arg(long, default_value = "./certs/server.key")]
        key_path: String,
        #[arg(long, default_value = "./certs/ca.crt")]
        ca_path: String,
    },
    /// Send commands to agents
    Command {
        #[arg(short, long)]
        group: Option<String>,
        #[command(subcommand)]
        cmd: CommandType,
    },
}

/// Command types that can be sent to agents
#[derive(Subcommand)]
enum CommandType {
    /// Get the hostname of target agents
    GetHostname,
    /// Execute a script on target agents
    RunScript {
        #[arg(short, long)]
        script: String,
        #[arg(short, long, default_value = "30")]
        timeout: u32,
    },
    /// Send a file to target agents
    SendFile {
        #[arg(short, long)]
        local_path: String,
        #[arg(short, long)]
        remote_path: String,
    },
}

/// Main server structure handling all operations
///
/// Contains database connection pool, RabbitMQ channel, and TLS configuration.
/// Manages agent registration, heartbeat monitoring, and command distribution.
struct NodeServer {
    /// PostgreSQL connection pool for persistent storage
    db_pool: PgPool,
    /// RabbitMQ channel for message queue operations
    rabbitmq_channel: Channel,
    /// TLS configuration for secure connections
    #[allow(dead_code)]
    tls_config: Option<shared::tls::TlsConfig>,
}

impl NodeServer {
    /// Create a new NodeServer instance
    ///
    /// Establishes connections to PostgreSQL database and RabbitMQ message queue.
    /// Sets up necessary exchanges, queues, and bindings for message routing.
    ///
    /// # Arguments
    /// * `rabbitmq_url` - RabbitMQ connection URL
    /// * `database_url` - PostgreSQL connection URL  
    /// * `tls_config` - Optional TLS configuration for secure connections
    async fn new(
        rabbitmq_url: &str,
        database_url: &str,
        tls_config: Option<shared::tls::TlsConfig>,
    ) -> Result<Self> {
        // Validate TLS configuration if provided
        if let Some(ref config) = tls_config {
            shared::tls::validate_cert_files(config)?;
            println!("🔒 TLS enabled for RabbitMQ connections");
        }

        // Connect to PostgreSQL database
        let db_pool = PgPoolOptions::new()
            .max_connections(10)
            .connect(database_url)
            .await?;

        // Connect to RabbitMQ (TLS support requires additional configuration)
        let conn = Connection::connect(rabbitmq_url, ConnectionProperties::default()).await?;
        let rabbitmq_channel = conn.create_channel().await?;

        // Declare exchanges and queues
        Self::setup_rabbitmq(&rabbitmq_channel).await?;

        Ok(Self {
            db_pool,
            rabbitmq_channel,
            tls_config,
        })
    }

    /// Setup RabbitMQ exchanges and queues for the server
    ///
    /// This function declares all necessary RabbitMQ infrastructure:
    /// - Task Exchange: Topic exchange for routing commands to agent groups
    /// - Result Exchange: Direct exchange for collecting command results
    /// - Registration Queue: For agent registration messages
    /// - Heartbeat Queue: For agent heartbeat/health messages  
    /// - Result Queue: For collecting task execution results
    ///
    /// # Arguments
    /// * `channel` - RabbitMQ channel to use for declarations
    ///
    /// # Returns
    /// `Result<()>` - Ok if all setup is successful, Err with details if any step fails
    ///
    /// # Errors
    /// May return errors for:
    /// - Exchange declaration failures
    /// - Queue declaration failures
    /// - Network connectivity issues with RabbitMQ
    async fn setup_rabbitmq(channel: &Channel) -> Result<()> {
        // Declare Task Exchange (Topic routing for group-based command distribution)
        if let Err(e) = channel
            .exchange_declare(
                queues::TASK_EXCHANGE,
                lapin::ExchangeKind::Topic,
                ExchangeDeclareOptions::default(),
                FieldTable::default(),
            )
            .await
        {
            return Err(anyhow::anyhow!("Failed to declare task exchange: {}", e));
        }

        // Declare Result Exchange (Direct routing for result collection)
        if let Err(e) = channel
            .exchange_declare(
                queues::RESULT_EXCHANGE,
                lapin::ExchangeKind::Direct,
                ExchangeDeclareOptions::default(),
                FieldTable::default(),
            )
            .await
        {
            return Err(anyhow::anyhow!("Failed to declare result exchange: {}", e));
        }

        // Declare agent registration queue
        if let Err(e) = channel
            .queue_declare(
                queues::REGISTRATION_QUEUE,
                QueueDeclareOptions::default(),
                FieldTable::default(),
            )
            .await
        {
            return Err(anyhow::anyhow!(
                "Failed to declare registration queue: {}",
                e
            ));
        }

        // Declare heartbeat monitoring queue
        if let Err(e) = channel
            .queue_declare(
                queues::HEARTBEAT_QUEUE,
                QueueDeclareOptions::default(),
                FieldTable::default(),
            )
            .await
        {
            return Err(anyhow::anyhow!("Failed to declare heartbeat queue: {}", e));
        }

        // Declare result collection queue
        if let Err(e) = channel
            .queue_declare(
                queues::RESULT_QUEUE,
                QueueDeclareOptions::default(),
                FieldTable::default(),
            )
            .await
        {
            return Err(anyhow::anyhow!("Failed to declare result queue: {}", e));
        }

        // Bind result queue to result exchange
        channel
            .queue_bind(
                queues::RESULT_QUEUE,
                queues::RESULT_EXCHANGE,
                "result",
                QueueBindOptions::default(),
                FieldTable::default(),
            )
            .await?;

        println!("✅ RabbitMQ exchanges and queues setup completed");
        Ok(())
    }

    async fn start_services(self) -> Result<()> {
        let server = Arc::new(self);

        // Start registration message consumer
        let registration_server = Arc::clone(&server);
        let registration_consumer = registration_server
            .rabbitmq_channel
            .basic_consume(
                queues::REGISTRATION_QUEUE,
                "registration_consumer",
                BasicConsumeOptions::default(),
                FieldTable::default(),
            )
            .await?;

        tokio::spawn(async move {
            Self::handle_registration_messages(registration_server, registration_consumer).await;
        });

        // Start heartbeat message consumer
        let heartbeat_server = Arc::clone(&server);
        let heartbeat_consumer = heartbeat_server
            .rabbitmq_channel
            .basic_consume(
                queues::HEARTBEAT_QUEUE,
                "heartbeat_consumer",
                BasicConsumeOptions::default(),
                FieldTable::default(),
            )
            .await?;

        tokio::spawn(async move {
            Self::handle_heartbeat_messages(heartbeat_server, heartbeat_consumer).await;
        });

        // Start result message consumer
        let result_server = Arc::clone(&server);
        let result_consumer = result_server
            .rabbitmq_channel
            .basic_consume(
                queues::RESULT_QUEUE,
                "result_consumer",
                BasicConsumeOptions::default(),
                FieldTable::default(),
            )
            .await?;

        tokio::spawn(async move {
            Self::handle_result_messages(result_server, result_consumer).await;
        });

        // Start health check periodic task
        let health_check_server = Arc::clone(&server);
        tokio::spawn(async move {
            Self::health_check_task(health_check_server).await;
        });

        println!("🚀 Node Server started successfully!");
        println!("📊 RabbitMQ Management UI: http://localhost:15672 (admin:admin123)");
        println!("🗄️  PostgreSQL: localhost:5432 (admin:admin123)");

        // Keep service running
        tokio::signal::ctrl_c().await?;
        println!("🛑 Shutting down server...");
        Ok(())
    }

    /// Handle incoming agent registration messages from RabbitMQ queue
    ///
    /// This function processes agent registration messages continuously. When agents start,
    /// they send registration messages containing their identification and configuration.
    /// The server validates tokens and stores agent information in the database.
    ///
    /// # Arguments
    /// * `server` - Shared NodeServer instance for database and validation operations
    /// * `consumer` - RabbitMQ consumer stream for registration messages
    ///
    /// # Behavior
    /// - Runs in continuous loop processing registration messages
    /// - Validates agent tokens before allowing registration
    /// - Updates PostgreSQL database with agent information
    /// - Handles message acknowledgment for processed messages
    /// - Logs errors but continues processing other messages
    async fn handle_registration_messages(server: Arc<NodeServer>, mut consumer: Consumer) {
        use futures_lite::stream::StreamExt;

        while let Some(delivery) = consumer.next().await {
            match delivery {
                Ok(delivery) => {
                    if let Ok(registration) =
                        serde_json::from_slice::<AgentRegistration>(&delivery.data)
                    {
                        if let Err(e) = server.handle_agent_registration(registration).await {
                            eprintln!("❌ Failed to handle agent registration: {}", e);
                        }
                    }
                    let _ = delivery.ack(BasicAckOptions::default()).await;
                }
                Err(e) => {
                    eprintln!("❌ Failed to consume registration message: {}", e);
                }
            }
        }
    }

    /// Handle incoming heartbeat messages from agents
    ///
    /// This function continuously processes heartbeat messages sent by active agents.
    /// Heartbeats are critical for monitoring agent health and determining which agents
    /// are available for task execution. Regular heartbeats keep agents marked as 'online'.
    ///
    /// # Arguments
    /// * `server` - Shared NodeServer instance for database operations
    /// * `consumer` - RabbitMQ consumer stream for heartbeat messages
    ///
    /// # Behavior
    /// - Updates agent last_heartbeat timestamp in database
    /// - Marks agents as 'online' when heartbeat received
    /// - Optionally stores heartbeat records for debugging
    /// - Continues processing despite individual message errors
    async fn handle_heartbeat_messages(server: Arc<NodeServer>, mut consumer: Consumer) {
        use futures_lite::stream::StreamExt;

        while let Some(delivery) = consumer.next().await {
            match delivery {
                Ok(delivery) => {
                    if let Ok(heartbeat) =
                        serde_json::from_slice::<HeartbeatMessage>(&delivery.data)
                    {
                        if let Err(e) = server.handle_heartbeat(heartbeat).await {
                            eprintln!("❌ Failed to handle heartbeat: {}", e);
                        }
                    }
                    let _ = delivery.ack(BasicAckOptions::default()).await;
                }
                Err(e) => {
                    eprintln!("❌ Failed to consume heartbeat message: {}", e);
                }
            }
        }
    }

    /// Handle task execution results from agents
    ///
    /// This function processes task result messages sent by agents after they complete
    /// command execution. Results include success/failure status, command output,
    /// and execution metadata. The server stores results and updates task status.
    ///
    /// # Arguments
    /// * `server` - Shared NodeServer instance for database operations
    /// * `consumer` - RabbitMQ consumer stream for result messages
    ///
    /// # Behavior
    /// - Stores complete task results in database
    /// - Updates task status from 'pending' to 'completed'
    /// - Logs result summaries for monitoring
    /// - Handles message acknowledgment automatically
    async fn handle_result_messages(server: Arc<NodeServer>, mut consumer: Consumer) {
        use futures_lite::stream::StreamExt;

        while let Some(delivery) = consumer.next().await {
            match delivery {
                Ok(delivery) => {
                    if let Ok(result) = serde_json::from_slice::<ResultMessage>(&delivery.data) {
                        if let Err(e) = server.handle_task_result(result).await {
                            eprintln!("❌ Failed to handle task result: {}", e);
                        }
                    }
                    let _ = delivery.ack(BasicAckOptions::default()).await;
                }
                Err(e) => {
                    eprintln!("❌ Failed to consume result message: {}", e);
                }
            }
        }
    }

    /// Process agent registration request and store in database
    ///
    /// This function validates agent registration data and stores agent information
    /// in PostgreSQL. Each agent must provide a valid authentication token and
    /// complete identification details for successful registration.
    ///
    /// # Arguments
    /// * `registration` - Agent registration data including ID, hostname, groups, token, OS info
    ///
    /// # Returns
    /// `Result<()>` - Ok if registration successful, Err if validation or storage fails
    ///
    /// # Database Operations
    /// - Validates authentication token against server configuration
    /// - Inserts new agent record or updates existing agent information
    /// - Sets agent status to 'online' and updates last heartbeat timestamp
    /// - Uses UPSERT pattern to handle duplicate registrations gracefully
    ///
    /// # Security
    /// - Token validation prevents unauthorized agent registration
    /// - Token hash stored instead of plaintext for security
    async fn handle_agent_registration(&self, registration: AgentRegistration) -> Result<()> {
        // Validate token
        if !self.validate_token(&registration.token) {
            println!(
                "❌ Invalid token for agent {} ({})",
                registration.agent_id, registration.hostname
            );
            return Err(anyhow::anyhow!("Invalid token"));
        }

        println!(
            "📝 Agent registration: {} ({})",
            registration.agent_id, registration.hostname
        );

        // Calculate token hash for storage
        let token_hash = self.hash_token(&registration.token);

        // Insert or update agent information in database
        match sqlx::query(
            "INSERT INTO agents (agent_id, hostname, groups, token_hash, os_info, status, last_heartbeat) 
             VALUES ($1, $2, $3, $4, $5, 'online', NOW())
             ON CONFLICT (agent_id) 
             DO UPDATE SET hostname = $2, groups = $3, token_hash = $4, status = 'online', last_heartbeat = NOW(), updated_at = NOW()")
        .bind(&registration.agent_id)
        .bind(&registration.hostname)
        .bind(&registration.groups)
        .bind(&token_hash)
        .bind(&registration.os_info)
        .execute(&self.db_pool)
        .await {
            Ok(_) => {
                println!("✅ Agent {} registered successfully", registration.agent_id);
                Ok(())
            }
            Err(e) => {
                eprintln!("❌ Failed to store agent registration in database: {}", e);
                Err(anyhow::anyhow!("Database registration failed: {}", e))
            }
        }
    }

    /// Process agent heartbeat to maintain health status
    ///
    /// This function updates agent heartbeat information in the database to track
    /// agent health and availability. Regular heartbeats are essential for the
    /// server to know which agents are online and can receive tasks.
    ///
    /// # Arguments
    /// * `heartbeat` - Heartbeat message containing agent ID and timestamp
    ///
    /// # Returns
    /// `Result<()>` - Ok if heartbeat processed successfully, Err on database failure
    ///
    /// # Database Operations
    /// - Updates last_heartbeat timestamp for the agent
    /// - Sets agent status to 'online'
    /// - Optionally inserts heartbeat record for debugging (non-critical)
    ///
    /// # Health Monitoring
    /// - Agents must send regular heartbeats to stay marked as online
    /// - Missing heartbeats will cause agents to be marked offline by health check task
    async fn handle_heartbeat(&self, heartbeat: HeartbeatMessage) -> Result<()> {
        // Update agent last heartbeat time
        match sqlx::query(
            "UPDATE agents SET last_heartbeat = $1, status = 'online' WHERE agent_id = $2",
        )
        .bind(heartbeat.timestamp)
        .bind(&heartbeat.agent_id)
        .execute(&self.db_pool)
        .await
        {
            Ok(_) => {
                // Optional: insert heartbeat record for debugging
                let _ = sqlx::query("INSERT INTO heartbeats (agent_id, timestamp) VALUES ($1, $2)")
                    .bind(&heartbeat.agent_id)
                    .bind(heartbeat.timestamp)
                    .execute(&self.db_pool)
                    .await; // Ignore errors, heartbeat records are not critical

                Ok(())
            }
            Err(e) => {
                eprintln!("❌ Failed to update agent heartbeat in database: {}", e);
                Err(anyhow::anyhow!("Failed to process heartbeat: {}", e))
            }
        }
    }

    /// Store task execution results and update task status
    ///
    /// This function processes task results returned by agents after command execution.
    /// Results include success/failure status, command output, error messages, and
    /// execution timestamps. The server stores this information for monitoring and auditing.
    ///
    /// # Arguments
    /// * `result` - Task result message containing command ID, agent ID, status, output, timestamp
    ///
    /// # Returns
    /// `Result<()>` - Ok if result stored successfully, Err on database failure
    ///
    /// # Database Operations
    /// - Inserts complete task result into task_results table
    /// - Updates task status from 'pending' to 'completed' in tasks table
    /// - Records completion timestamp for task tracking
    ///
    /// # Result Types
    /// - Success: Command executed successfully with output
    /// - Error: Command failed with error message and exit code
    /// - Timeout: Command exceeded execution time limit
    async fn handle_task_result(&self, result: ResultMessage) -> Result<()> {
        println!(
            "📥 Task result: {} from {}: {}",
            result.command_id, result.agent_id, result.status
        );

        // Store task result
        match sqlx::query(
            "INSERT INTO task_results (command_id, agent_id, status, result, completed_at) 
             VALUES ($1, $2, $3, $4, $5)",
        )
        .bind(&result.command_id)
        .bind(&result.agent_id)
        .bind(&result.status)
        .bind(&result.result)
        .bind(result.completed_at)
        .execute(&self.db_pool)
        .await
        {
            Ok(_) => {
                // Update task status
                match sqlx::query(
                    "UPDATE tasks SET status = 'completed', completed_at = $1 WHERE command_id = $2",
                )
                .bind(result.completed_at)
                .bind(&result.command_id)
                .execute(&self.db_pool)
                .await {
                    Ok(_) => {
                        println!("✅ Task result stored for command {}", result.command_id);
                        Ok(())
                    }
                    Err(e) => {
                        eprintln!("❌ Failed to update task status: {}", e);
                        Err(anyhow::anyhow!("Failed to update task status: {}", e))
                    }
                }
            }
            Err(e) => {
                eprintln!("❌ Failed to store task result: {}", e);
                Err(anyhow::anyhow!("Failed to store task result: {}", e))
            }
        }
    }

    /// Periodic health check task that runs every 30 seconds
    ///
    /// This background task monitors agent health by checking heartbeat timestamps.
    /// Agents that fail to send heartbeats within the timeout threshold are marked
    /// as offline and will not receive new tasks until they resume sending heartbeats.
    ///
    /// # Arguments
    /// * `server` - Shared NodeServer instance for database operations
    ///
    /// # Behavior
    /// - Runs continuously with 30-second intervals
    /// - Calls check_agent_health() to evaluate agent status
    /// - Logs errors but continues monitoring
    /// - Critical for maintaining accurate agent availability
    async fn health_check_task(server: Arc<NodeServer>) {
        let mut interval = tokio::time::interval(tokio::time::Duration::from_secs(30));

        loop {
            interval.tick().await;
            if let Err(e) = server.check_agent_health().await {
                eprintln!("❌ Health check failed: {}", e);
            }
        }
    }

    /// Check agent health and mark unresponsive agents as offline
    ///
    /// This function evaluates agent health based on heartbeat timestamps and marks
    /// agents that haven't sent heartbeats recently as offline. This prevents the
    /// server from sending tasks to unresponsive agents.
    ///
    /// # Returns
    /// `Result<()>` - Ok if health check completed successfully, Err on database failure
    ///
    /// # Health Logic
    /// - Timeout threshold: 3 minutes without heartbeat
    /// - Only agents currently marked as 'online' are evaluated
    /// - Offline agents won't receive new task assignments
    /// - Agents can recover by resuming heartbeat messages
    ///
    /// # Database Operations
    /// - Updates agent status from 'online' to 'offline' for timed-out agents
    /// - Returns list of newly offline agents for logging
    async fn check_agent_health(&self) -> Result<()> {
        // Mark agents that fail to respond for more than 3 minutes as offline
        let timeout_threshold = chrono::Utc::now() - chrono::Duration::minutes(3);

        match sqlx::query_as::<_, (String,)>(
            "UPDATE agents SET status = 'offline' 
             WHERE last_heartbeat < $1 AND status = 'online'
             RETURNING agent_id",
        )
        .bind(timeout_threshold)
        .fetch_all(&self.db_pool)
        .await
        {
            Ok(offline_agents) => {
                for (agent_id,) in offline_agents {
                    println!(
                        "⚠️  Agent {} marked as offline due to heartbeat timeout",
                        agent_id
                    );
                }
                Ok(())
            }
            Err(e) => {
                eprintln!("❌ Failed to check agent health: {}", e);
                Err(anyhow::anyhow!("Health check failed: {}", e))
            }
        }
    }

    /// Send task to agents via RabbitMQ routing
    ///
    /// This function distributes tasks to agents using group-based routing through
    /// RabbitMQ topic exchange. Tasks are stored in the database for tracking and
    /// published to the appropriate routing key for agent consumption.
    ///
    /// # Arguments
    /// * `task` - Task message containing command details and target group
    ///
    /// # Returns
    /// `Result<()>` - Ok if task sent successfully, Err on database or messaging failure
    ///
    /// # Routing Logic
    /// - Uses target_group as routing key, defaults to "default" if none specified
    /// - Topic exchange allows flexible routing patterns (e.g., "web.*", "db.production")
    /// - Only online agents in matching groups will receive the task
    ///
    /// # Database Operations
    /// - Stores task with 'pending' status for tracking
    /// - Task status updated to 'completed' when agent sends result
    ///
    /// # Message Flow
    /// 1. Task stored in database with pending status
    /// 2. Task published to RabbitMQ topic exchange
    /// 3. Agents subscribed to matching routing keys receive task
    /// 4. Agents execute command and send results back
    async fn send_task(&self, task: TaskMessage) -> Result<()> {
        let routing_key = task.target_group.as_deref().unwrap_or("default");

        // Store tasks in the database for tracking
        match sqlx::query(
            "INSERT INTO tasks (command_id, command_type, payload, target_group, status) 
             VALUES ($1, $2, $3, $4, 'pending')",
        )
        .bind(&task.command_id)
        .bind(&task.command_type)
        .bind(&task.payload)
        .bind(&task.target_group)
        .execute(&self.db_pool)
        .await
        {
            Ok(_) => {
                // Publish tasks to RabbitMQ
                let payload = match serde_json::to_vec(&task) {
                    Ok(payload) => payload,
                    Err(e) => return Err(anyhow::anyhow!("Failed to serialize task: {}", e)),
                };

                match self
                    .rabbitmq_channel
                    .basic_publish(
                        queues::TASK_EXCHANGE,
                        routing_key,
                        BasicPublishOptions::default(),
                        &payload,
                        BasicProperties::default(),
                    )
                    .await
                {
                    Ok(confirm) => match confirm.await {
                        Ok(_) => {
                            println!("📤 Task {} sent to group: {}", task.command_id, routing_key);
                            Ok(())
                        }
                        Err(e) => {
                            eprintln!("❌ Failed to confirm task publication: {}", e);
                            Err(anyhow::anyhow!("Task publication not confirmed: {}", e))
                        }
                    },
                    Err(e) => {
                        eprintln!("❌ Failed to publish task to RabbitMQ: {}", e);
                        Err(anyhow::anyhow!("Failed to publish task: {}", e))
                    }
                }
            }
            Err(e) => {
                eprintln!("❌ Failed to store task in database: {}", e);
                Err(anyhow::anyhow!("Failed to store task: {}", e))
            }
        }
    }

    /// Validate agent authentication token
    ///
    /// This function checks if the provided token is in the list of valid tokens.
    /// In production environments, tokens should be stored securely in a database
    /// or configuration file with proper encryption and rotation policies.
    ///
    /// # Arguments
    /// * `token` - Agent-provided authentication token to validate
    ///
    /// # Returns
    /// `bool` - true if token is valid, false otherwise
    ///
    /// # Security Notes
    /// - This implementation uses hardcoded tokens for demonstration
    /// - Production systems should use database-backed token validation
    /// - Consider implementing token expiration and rotation
    /// - Tokens should be transmitted over TLS only
    fn validate_token(&self, token: &str) -> bool {
        // Simple token validation - in a real environment, tokens should be read from a database or config file
        let valid_tokens = [
            "secure_token_123",
            "admin_token_456",
            "production_token_789",
        ];
        valid_tokens.contains(&token)
    }

    /// Calculate SHA-256 hash of authentication token
    ///
    /// This function generates a secure hash of the authentication token for
    /// database storage. Storing token hashes instead of plaintext provides
    /// better security in case of database compromise.
    ///
    /// # Arguments
    /// * `token` - Plain text token to hash
    ///
    /// # Returns
    /// `String` - Hexadecimal representation of SHA-256 hash
    ///
    /// # Security Benefits
    /// - Prevents token exposure in database dumps
    /// - Enables secure token comparison without storing plaintext
    /// - SHA-256 provides strong cryptographic security
    /// - Hash is one-way, original token cannot be recovered
    fn hash_token(&self, token: &str) -> String {
        let mut hasher = Sha256::new();
        hasher.update(token.as_bytes());
        format!("{:x}", hasher.finalize())
    }
}

#[tokio::main]
async fn main() -> Result<()> {
    let cli = Cli::parse();

    match cli.command {
        Commands::Start {
            rabbitmq_url,
            database_url,
            enable_tls,
            cert_path,
            key_path,
            ca_path,
        } => {
            // Create TLS configuration if enabled
            let tls_config = if enable_tls {
                Some(
                    shared::tls::TlsConfig::with_certs(cert_path, key_path, Some(ca_path))
                        .disable_hostname_verification(),
                ) // Allow insecure TLS for development
            } else {
                None
            };

            let server = NodeServer::new(&rabbitmq_url, &database_url, tls_config).await?;
            server.start_services().await?;
        }
        Commands::Command { group, cmd } => {
            // Connect to RabbitMQ and send command
            let rabbitmq_url = "amqp://admin:admin123@127.0.0.1:5672";
            let database_url = "postgres://admin:admin123@localhost:5432/node_management";
            let server = NodeServer::new(rabbitmq_url, database_url, None).await?;

            let task = match cmd {
                CommandType::GetHostname => TaskMessage::new_get_hostname(group),
                CommandType::RunScript { script, timeout } => {
                    TaskMessage::new_run_script(&script, timeout, group)
                }
                CommandType::SendFile {
                    local_path,
                    remote_path,
                } => {
                    let file_content = std::fs::read(&local_path)?;
                    TaskMessage::new_receive_file(&remote_path, &file_content, group)
                }
            };

            server.send_task(task).await?;
            println!("✅ Command sent successfully");
        }
    }

    Ok(())
}

#[cfg(test)]
mod tests {
    //! Unit tests for node_server functionality

    use super::*;
    use sha2::{Digest, Sha256};

    #[test]
    fn token_hashing() {
        let token = "secure_token_123";

        // Test token hashing logic (extracted from NodeServer)
        let mut hasher = Sha256::new();
        hasher.update(token.as_bytes());
        let hash = format!("{:x}", hasher.finalize());

        // Verify hash consistency
        let mut hasher2 = Sha256::new();
        hasher2.update(token.as_bytes());
        let hash2 = format!("{:x}", hasher2.finalize());

        assert_eq!(hash, hash2);
        assert!(!hash.is_empty());
        assert_eq!(hash.len(), 64); // SHA256 produces 64 hex characters
    }

    #[test]
    fn token_validation_logic() {
        let valid_tokens = vec![
            "secure_token_123",
            "admin_token_456",
            "production_token_789",
        ];

        // Test the validation logic used in NodeServer
        for token in &valid_tokens {
            assert!(valid_tokens.contains(token));
            assert!(!token.is_empty());
            assert!(token.len() > 5);
        }

        // Test invalid tokens
        let invalid_tokens = vec!["", "short", "invalid_token"];
        for invalid_token in &invalid_tokens {
            if invalid_token.is_empty() || invalid_token.len() <= 5 {
                assert!(!valid_tokens.contains(invalid_token));
            }
        }
    }

    #[test]
    fn group_membership_filtering() {
        // Test group filtering logic for multi-group agents
        let agents = vec![
            AgentRegistration::new(
                "web-agent".to_string(),
                "web-node".to_string(),
                vec!["web".to_string()],
                "web_token".to_string(),
                "Ubuntu".to_string(),
            ),
            AgentRegistration::new(
                "multi-agent".to_string(),
                "multi-node".to_string(),
                vec!["web".to_string(), "database".to_string()],
                "multi_token".to_string(),
                "RHEL".to_string(),
            ),
        ];

        // Test web group filtering
        let web_agents: Vec<&AgentRegistration> = agents
            .iter()
            .filter(|a| a.groups.contains(&"web".to_string()))
            .collect();
        assert_eq!(web_agents.len(), 2);

        // Test database group filtering
        let db_agents: Vec<&AgentRegistration> = agents
            .iter()
            .filter(|a| a.groups.contains(&"database".to_string()))
            .collect();
        assert_eq!(db_agents.len(), 1);
        assert_eq!(db_agents[0].agent_id, "multi-agent");
    }
}
