use std::sync::Arc;
use std::time::Duration;
use nacos_core::{ClusterManager, ServiceStorage, Result, NacosError, ServiceInstance};
use tracing::{info, warn};

pub struct InstanceHealthChecker {
    service_storage: Arc<dyn ServiceStorage>,
    cluster_manager: Arc<dyn ClusterManager>,
    heartbeat_timeout: Duration,
    check_interval: Duration,
}

impl InstanceHealthChecker {
    pub fn new(
        service_storage: Arc<dyn ServiceStorage>,
        cluster_manager: Arc<dyn ClusterManager>,
        heartbeat_timeout: Duration,
        check_interval: Duration,
    ) -> Self {
        Self {
            service_storage,
            cluster_manager,
            heartbeat_timeout,
            check_interval,
        }
    }

    pub async fn run(&self) {
        loop {
            tokio::time::sleep(self.check_interval).await;
            if let Err(e) = self.check_and_remove_expired_instances().await {
                warn!("Error during instance health check: {}", e);
            }
        }
    }

    async fn check_and_remove_expired_instances(&self) -> Result<()> {
        if !self.cluster_manager.is_leader().await? {
            info!("Not the leader, skipping instance health check.");
            return Ok(());
        }

        info!("Running instance health check as leader.");
        // Note: In a real, large-scale system, scanning all instances like this is inefficient.
        // A more optimized approach would involve time-wheel algorithms or other strategies.
        let all_instances_bytes = self.service_storage.scan("service:").await?;

        for (_, instance_bytes) in all_instances_bytes {
            if let Ok(instance) = serde_json::from_slice::<ServiceInstance>(&instance_bytes) {
                if instance.ephemeral {
                    let now = chrono::Utc::now();
                    let last_heartbeat = instance.last_heartbeat_time;
                    let duration_since_heartbeat = now.signed_duration_since(last_heartbeat).to_std().map_err(|e| NacosError::Internal(e.to_string()))?;
                    
                    if duration_since_heartbeat > self.heartbeat_timeout {
                        warn!("Instance {} of service {} has expired. Removing.", instance.instance_id, instance.service_name);
                        self.service_storage.deregister_instance(
                            &instance.namespace,
                            &instance.group_name,
                            &instance.service_name,
                            &instance.instance_id,
                        ).await?;
                    }
                }
            }
        }
        Ok(())
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use nacos_core::{
        ClusterManager, Result, ServiceInstance, ServiceStorage, NacosError, NodeRole, ClusterNode, 
    };
    use nacos_storage::memory::MemoryServiceStorage;
    use std::sync::Arc;
    use std::time::Duration;
    use async_trait::async_trait;

    #[derive(Debug, Default)]
    struct MockClusterManager { is_leader: bool }

    #[async_trait]
    impl ClusterManager for MockClusterManager {
        async fn is_leader(&self) -> Result<bool> { Ok(self.is_leader) }
        async fn get_cluster_nodes(&self) -> Result<Vec<ClusterNode>> { Ok(vec![]) }
        async fn get_current_node(&self) -> Result<ClusterNode> { Err(NacosError::Unsupported) }
        async fn get_leader(&self) -> Result<Option<ClusterNode>> { Ok(None) }
        async fn elect_leader(&self) -> Result<()> { Ok(()) }
        async fn add_node(&self, _node: &ClusterNode) -> Result<()> { Ok(()) }
        async fn remove_node(&self, _node_id: &str) -> Result<()> { Ok(()) }
        async fn update_node_state(&self, _node_id: &str, _role: NodeRole) -> Result<()> { Ok(()) }
        async fn send_heartbeat(&self, _target_node: &str) -> Result<()> { Ok(()) }
        async fn sync_data(&self, _data: &[u8]) -> Result<()> { Ok(()) }
    }

    #[tokio::test]
    async fn test_health_checker_removes_expired_instance() {
        let storage = Arc::new(MemoryServiceStorage::new());
        let cluster_manager = Arc::new(MockClusterManager { is_leader: true });
        let heartbeat_timeout = Duration::from_millis(100);
        let check_interval = Duration::from_millis(10);

        let health_checker = InstanceHealthChecker::new(
            storage.clone(),
            cluster_manager.clone(),
            heartbeat_timeout,
            check_interval,
        );

        // Start the health checker in a background task
        tokio::spawn(async move {
            health_checker.run().await;
        });

        // Register a new ephemeral instance
        let mut instance = ServiceInstance::new("test_service".to_string(), "test_group".to_string(), "127.0.0.1".to_string(), 8080);
        instance.namespace = "public".to_string();
        instance.ephemeral = true;
        storage.register_instance(&instance).await.unwrap();

        // Verify it exists
        let retrieved = storage.get_instance("public", "test_group", "test_service", &instance.instance_id).await.unwrap();
        assert!(retrieved.is_some(), "Instance should exist immediately after registration");

        // Wait for longer than the heartbeat timeout, giving the checker time to run
        tokio::time::sleep(heartbeat_timeout + check_interval * 5).await;

        // Verify it has been removed
        let retrieved_after_timeout = storage.get_instance("public", "test_group", "test_service", &instance.instance_id).await.unwrap();
        assert!(retrieved_after_timeout.is_none(), "Instance should have been removed after timeout");
    }
}