//! 负载均衡器核心实现
//! 
//! 提供统一的负载均衡接口，支持多种算法和配置

use std::sync::Arc;
use std::time::{Duration, Instant};
use std::collections::HashMap;
use anyhow::Result;
use tokio::sync::RwLock;
use tracing::{info, warn, error, debug};

use crate::load_balancer::config::{LoadBalancerConfig, LoadBalancingAlgorithm as AlgorithmType, BackendConfig};
use crate::load_balancer::algorithms::*;
use crate::load_balancer::health_check::{HealthChecker, BackendHealth};
use crate::load_balancer::metrics::LoadBalancerMetrics;

/// 负载均衡器
pub struct LoadBalancer {
    /// 配置
    config: LoadBalancerConfig,
    /// 负载均衡算法
    algorithm: Box<dyn LoadBalancingAlgorithm>,
    /// 后端服务器列表
    backends: Arc<RwLock<Vec<Backend>>>,
    /// 健康检查器
    health_checker: Arc<HealthChecker>,
    /// 指标收集器
    metrics: Arc<LoadBalancerMetrics>,
    /// 会话保持映射
    session_map: Arc<RwLock<HashMap<String, String>>>,
    /// 启动时间
    start_time: Instant,
}

/// 负载均衡器构建器
pub struct LoadBalancerBuilder {
    config: LoadBalancerConfig,
    health_checker: Option<Arc<HealthChecker>>,
    metrics: Option<Arc<LoadBalancerMetrics>>,
}

impl LoadBalancerBuilder {
    /// 创建新的构建器
    pub fn new(config: LoadBalancerConfig) -> Self {
        Self {
            config,
            health_checker: None,
            metrics: None,
        }
    }

    /// 设置健康检查器
    pub fn with_health_checker(mut self, health_checker: Arc<HealthChecker>) -> Self {
        self.health_checker = Some(health_checker);
        self
    }

    /// 设置指标收集器
    pub fn with_metrics(mut self, metrics: Arc<LoadBalancerMetrics>) -> Self {
        self.metrics = Some(metrics);
        self
    }

    /// 构建负载均衡器
    pub async fn build(self) -> Result<LoadBalancer> {
        let algorithm = create_algorithm(&self.config.algorithm);
        
        // 创建后端服务器列表
        let backends: Vec<Backend> = self.config.backends
            .iter()
            .map(|config| Backend::new(config.clone()))
            .collect();

        let backends = Arc::new(RwLock::new(backends));

        // 创建健康检查器
        let health_checker = self.health_checker.unwrap_or_else(|| {
            Arc::new(HealthChecker::new(
                self.config.health_check.clone(),
                backends.clone(),
            ))
        });

        // 创建指标收集器
        let metrics = self.metrics.unwrap_or_else(|| {
            Arc::new(LoadBalancerMetrics::new())
        });

        info!(
            "创建负载均衡器，算法: {}，后端数量: {}",
            algorithm.name(),
            self.config.backends.len()
        );

        Ok(LoadBalancer {
            config: self.config,
            algorithm,
            backends,
            health_checker,
            metrics,
            session_map: Arc::new(RwLock::new(HashMap::new())),
            start_time: Instant::now(),
        })
    }
}

impl LoadBalancer {
    /// 选择后端服务器
    pub async fn select_backend(&self, context: &SelectionContext) -> Result<Option<Arc<Backend>>> {
        self.metrics.increment_request_count().await;
        let start_time = Instant::now();

        // 检查会话保持
        if self.config.session_persistence.enabled {
            if let Some(backend) = self.check_session_persistence(context).await? {
                let selection_time = start_time.elapsed();
                self.metrics.record_selection_time(selection_time).await;
                return Ok(Some(Arc::new(backend)));
            }
        }

        // 使用算法选择后端
        let backends = self.backends.read().await;
        
        if let Some(_selected) = self.algorithm.select_backend(&backends[..], context).await {
            // TODO: 暂时简化，避免复杂的配置创建
            drop(backends); // 释放读锁
            warn!("后端选择功能暂未完全实现");
            Ok(None) // 暂时返回None，避免编译错误
        } else {
            warn!("没有可用的后端服务器");
            self.metrics.increment_error_count("no_backends_available").await;
            Ok(None)
        }
    }

    /// 检查会话保持
    async fn check_session_persistence(
        &self,
        context: &SelectionContext
    ) -> Result<Option<Backend>> {
        use crate::load_balancer::config::SessionPersistenceMethod;

        let session_key = match self.config.session_persistence.method {
            SessionPersistenceMethod::Cookie => {
                context.headers.get(
                    self.config.session_persistence.cookie_name.as_deref().unwrap_or("LB_SESSION")
                )
            }
            SessionPersistenceMethod::IpHash => {
                context.client_ip.as_ref()
            }
            SessionPersistenceMethod::CustomHash => {
                context.hash_key.as_ref().or(context.session_id.as_ref())
            }
            SessionPersistenceMethod::JwtToken => {
                context.headers.get("Authorization")
            }
        };

        if let Some(key) = session_key {
            let session_map = self.session_map.read().await;
            if let Some(backend_id) = session_map.get(key) {
                let backends = self.backends.read().await;
                if let Some(backend) = backends.iter().find(|b| &b.config.id == backend_id && b.is_healthy()) {
                    return Ok(Some(backend.clone()));
                }
            }
        }

        Ok(None)
    }

    /// 更新会话保持
    async fn update_session_persistence(
        &self,
        context: &SelectionContext,
        backend: &Backend,
    ) -> Result<()> {
        use crate::load_balancer::config::SessionPersistenceMethod;

        let session_key = match self.config.session_persistence.method {
            SessionPersistenceMethod::Cookie => {
                context.headers.get(
                    self.config.session_persistence.cookie_name.as_deref().unwrap_or("LB_SESSION")
                )
            }
            SessionPersistenceMethod::IpHash => {
                context.client_ip.as_ref()
            }
            SessionPersistenceMethod::CustomHash => {
                context.hash_key.as_ref().or(context.session_id.as_ref())
            }
            SessionPersistenceMethod::JwtToken => {
                context.headers.get("Authorization")
            }
        };

        if let Some(key) = session_key {
            let mut session_map = self.session_map.write().await;
            session_map.insert(key.clone(), backend.config.id.clone());
        }

        Ok(())
    }

    /// 启动负载均衡器
    pub async fn start(&self) -> Result<()> {
        info!("启动负载均衡器");

        // 启动健康检查
        if self.config.health_check.enabled {
            self.health_checker.start().await?;
        }

        // 启动指标收集
        self.metrics.start().await?;

        // 启动会话清理任务
        if self.config.session_persistence.enabled {
            self.start_session_cleanup().await;
        }

        info!("负载均衡器启动完成");
        Ok(())
    }

    /// 停止负载均衡器
    pub async fn stop(&self) -> Result<()> {
        info!("停止负载均衡器");

        // 停止健康检查
        self.health_checker.stop().await?;

        // 停止指标收集
        self.metrics.stop().await?;

        info!("负载均衡器已停止");
        Ok(())
    }

    /// 启动会话清理任务
    async fn start_session_cleanup(&self) {
        let session_map = self.session_map.clone();
        let timeout = self.config.session_persistence.timeout;

        tokio::spawn(async move {
            let mut interval = tokio::time::interval(Duration::from_secs(300)); // 每5分钟清理一次
            
            loop {
                interval.tick().await;
                
                let mut map = session_map.write().await;
                let now = Instant::now();
                
                // 简化的过期检查 - 实际实现中应该记录会话创建时间
                if map.len() > 10000 { // 如果会话过多，清理一部分
                    let keys_to_remove: Vec<_> = map.keys().take(1000).cloned().collect();
                    for key in keys_to_remove {
                        map.remove(&key);
                    }
                }
            }
        });
    }

    /// 添加后端服务器
    pub async fn add_backend(&self, backend_config: crate::load_balancer::config::BackendConfig) -> Result<()> {
        let backend = Backend::new(backend_config.clone());
        
        {
            let mut backends = self.backends.write().await;
            backends.push(backend);
        }

        // 更新健康检查
        self.health_checker.add_backend(backend_config.id.clone()).await;

        info!("添加后端服务器: {}", backend_config.id);
        Ok(())
    }

    /// 移除后端服务器
    pub async fn remove_backend(&self, backend_id: &str) -> Result<()> {
        {
            let mut backends = self.backends.write().await;
            backends.retain(|b| b.config.id != backend_id);
        }

        // 更新健康检查
        self.health_checker.remove_backend(backend_id).await;

        // 清理会话映射
        {
            let mut session_map = self.session_map.write().await;
            session_map.retain(|_, backend| backend != backend_id);
        }

        info!("移除后端服务器: {}", backend_id);
        Ok(())
    }

    /// 获取负载均衡器状态
    pub async fn get_status(&self) -> LoadBalancerStatus {
        let backends = self.backends.read().await;
        let health_status = self.health_checker.get_health_status().await;
        
        LoadBalancerStatus {
            algorithm: self.algorithm.name().to_string(),
            total_backends: backends.len(),
            healthy_backends: backends.iter().filter(|b| b.is_healthy()).count(),
            total_requests: self.metrics.get_total_requests().await,
            uptime: self.start_time.elapsed(),
            session_count: self.session_map.read().await.len(),
            health_status,
        }
    }

    /// 获取指标
    pub fn metrics(&self) -> Arc<LoadBalancerMetrics> {
        self.metrics.clone()
    }
}

/// 负载均衡器状态
#[derive(Debug, Clone)]
pub struct LoadBalancerStatus {
    /// 算法名称
    pub algorithm: String,
    /// 总后端数
    pub total_backends: usize,
    /// 健康后端数
    pub healthy_backends: usize,
    /// 总请求数
    pub total_requests: u64,
    /// 运行时间
    pub uptime: Duration,
    /// 会话数量
    pub session_count: usize,
    /// 健康状态
    pub health_status: Vec<BackendHealth>,
}

/// 创建负载均衡算法
fn create_algorithm(algorithm_type: &AlgorithmType) -> Box<dyn LoadBalancingAlgorithm> {
    match algorithm_type {
        AlgorithmType::RoundRobin => Box::new(RoundRobinAlgorithm::new()),
        AlgorithmType::WeightedRoundRobin => Box::new(WeightedRoundRobinAlgorithm::new()),
        AlgorithmType::LeastConnections => Box::new(LeastConnectionsAlgorithm::new()),
        AlgorithmType::WeightedLeastConnections => Box::new(WeightedLeastConnectionsAlgorithm::new()),
        AlgorithmType::IpHash => Box::new(IpHashAlgorithm::new()),
        AlgorithmType::Random => Box::new(RandomAlgorithm::new()),
        AlgorithmType::LeastResponseTime => Box::new(LeastResponseTimeAlgorithm::new()),
        AlgorithmType::ResourceAware => Box::new(ResourceAwareAlgorithm::new()),
        _ => {
            warn!("不支持的负载均衡算法，使用轮询算法");
            Box::new(RoundRobinAlgorithm::new())
        }
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use crate::load_balancer::config::{BackendConfig, ResourceCapacity};
    use std::net::SocketAddr;

    fn create_test_config() -> LoadBalancerConfig {
        let mut config = LoadBalancerConfig::default();
        
        for i in 1..=3 {
            let mut backend_config = BackendConfig::default();
            backend_config.id = format!("backend{}", i);
            backend_config.address = format!("127.0.0.1:{}", 8080 + i).parse::<SocketAddr>().unwrap();
            backend_config.capacity = ResourceCapacity::default();
            config.backends.push(backend_config);
        }
        
        config
    }

    #[tokio::test]
    async fn test_load_balancer_creation() {
        let config = create_test_config();
        let load_balancer = LoadBalancerBuilder::new(config)
            .build()
            .await
            .unwrap();
        
        assert_eq!(load_balancer.config.backends.len(), 3);
        assert_eq!(load_balancer.algorithm.name(), "round_robin");
    }

    #[tokio::test]
    async fn test_backend_selection() {
        let config = create_test_config();
        let load_balancer = LoadBalancerBuilder::new(config)
            .build()
            .await
            .unwrap();
        
        let context = SelectionContext::default();
        
        let backend1 = load_balancer.select_backend(&context).await.unwrap();
        let backend2 = load_balancer.select_backend(&context).await.unwrap();
        
        assert!(backend1.is_some());
        assert!(backend2.is_some());
        
        // 由于是轮询算法，两次选择应该选中不同的后端
        assert_ne!(backend1.unwrap().config.id, backend2.unwrap().config.id);
    }

    #[tokio::test]
    async fn test_add_remove_backend() {
        let config = create_test_config();
        let load_balancer = LoadBalancerBuilder::new(config)
            .build()
            .await
            .unwrap();
        
        // 添加新后端
        let mut new_backend = BackendConfig::default();
        new_backend.id = "backend4".to_string();
        new_backend.address = "127.0.0.1:8084".parse().unwrap();
        
        load_balancer.add_backend(new_backend).await.unwrap();
        
        let status = load_balancer.get_status().await;
        assert_eq!(status.total_backends, 4);
        
        // 移除后端
        load_balancer.remove_backend("backend4").await.unwrap();
        
        let status = load_balancer.get_status().await;
        assert_eq!(status.total_backends, 3);
    }
}