use crate::state_machine::StateMachine;
use async_trait::async_trait;
use rustcloud_core::{CircuitBreaker, ServiceResult, CircuitBreakerState, CircuitBreakerMetrics, Response, ServiceError, CallStats};
use std::collections::{HashMap, HashSet};
use std::sync::Arc;
use std::time::{Duration, SystemTime};
use tokio::sync::RwLock;
use serde::{Deserialize, Serialize};

/// 熔断策略类型
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
pub enum CircuitBreakerStrategy {
    /// 基于失败次数
    FailureCount,
    /// 基于失败率
    FailureRate,
    /// 基于响应时间
    ResponseTime,
    /// 基于并发量
    ConcurrentRequests,
    /// 组合策略
    Combined,
}

/// 异常类型过滤器
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ExceptionFilter {
    /// 需要计入熔断统计的异常类型
    pub counted_exceptions: HashSet<String>,
    /// 忽略的异常类型（不计入熔断统计）
    pub ignored_exceptions: HashSet<String>,
    /// 是否默认计入所有异常
    pub count_all_by_default: bool,
}

impl Default for ExceptionFilter {
    fn default() -> Self {
        let mut counted = HashSet::new();
        counted.insert("NetworkError".to_string());
        counted.insert("TimeoutError".to_string());
        counted.insert("ConnectionError".to_string());
        
        Self {
            counted_exceptions: counted,
            ignored_exceptions: HashSet::new(),
            count_all_by_default: false,
        }
    }
}

impl ExceptionFilter {
    /// 判断异常是否需要计入熔断统计
    pub fn should_count_exception(&self, error: &ServiceError) -> bool {
        let error_type = match error {
            ServiceError::NetworkError(_) => "NetworkError",
            ServiceError::TimeoutError(_) => "TimeoutError",
            ServiceError::LoadBalancerError(_) => "LoadBalancerError",
            ServiceError::CircuitBreakerOpen(_) => return false, // 熔断器打开的错误不计入统计
            ServiceError::SerializationError(_) => "SerializationError",
            ServiceError::ValidationError(_) => "ValidationError",
            ServiceError::ServiceNotFound(_) => "ServiceNotFound",
            ServiceError::NoAvailableInstances(_) => "NoAvailableInstances",
            ServiceError::ConfigurationError(_) => "ConfigurationError",
            ServiceError::RegistryError(_) => "RegistryError",
            ServiceError::MetricsError(_) => "MetricsError",
            ServiceError::NotImplemented(_) => "NotImplemented",
            ServiceError::AlertingError(_) => "AlertingError",
            ServiceError::TracingError(_) => "TracingError",
            ServiceError::ConfigError(_) => "ConfigError",
            // 使用通配符处理任何新增的错误类型
            _ => "Other",
        };
        
        // 如果在忽略列表中，不计入
        if self.ignored_exceptions.contains(error_type) {
            return false;
        }
        
        // 如果在计入列表中，计入
        if self.counted_exceptions.contains(error_type) {
            return true;
        }
        
        // 否则按默认策略
        self.count_all_by_default
    }
}

/// 渐进式恢复配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ProgressiveRecoveryConfig {
    /// 是否启用渐进式恢复
    pub enabled: bool,
    /// 初始恢复流量百分比
    pub initial_recovery_percentage: f64,
    /// 每次成功后增加的流量百分比
    pub success_increment_percentage: f64,
    /// 最大恢复流量百分比
    pub max_recovery_percentage: f64,
    /// 失败后重置恢复进度
    pub reset_on_failure: bool,
}

impl Default for ProgressiveRecoveryConfig {
    fn default() -> Self {
        Self {
            enabled: true,
            initial_recovery_percentage: 10.0,
            success_increment_percentage: 10.0,
            max_recovery_percentage: 100.0,
            reset_on_failure: true,
        }
    }
}

/// 增强的熔断器配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct AdvancedCircuitBreakerConfig {
    /// 是否启用熔断器
    pub enabled: bool,
    
    /// 熔断策略
    pub strategy: CircuitBreakerStrategy,
    
    /// 基于失败次数的配置
    pub failure_threshold: u32,
    
    /// 基于失败率的配置
    pub failure_rate_threshold: f64,     // 失败率阈值 (0.0-1.0)
    pub min_request_threshold: u32,      // 计算失败率需要的最小请求数
    
    /// 基于响应时间的配置
    pub response_time_threshold: Duration, // 响应时间阈值
    pub slow_request_threshold: u32,       // 慢请求计数阈值
    
    /// 基于并发量的配置
    pub max_concurrent_requests: u32,     // 最大并发请求数
    
    /// 恢复相关配置
    pub recovery_timeout: Duration,       // 从打开到半开的等待时间
    pub half_open_success_threshold: u32, // 半开状态转为关闭需要的成功次数
    pub half_open_max_calls: u32,        // 半开状态下的最大调用次数
    
    /// 异常过滤器
    pub exception_filter: ExceptionFilter,
    
    /// 渐进式恢复配置
    pub progressive_recovery: ProgressiveRecoveryConfig,
    
    /// 降级相关配置
    pub fallback_enabled: bool,          // 是否启用降级
    pub fallback_timeout: Duration,      // 降级函数执行超时时间
}

impl Default for AdvancedCircuitBreakerConfig {
    fn default() -> Self {
        Self {
            enabled: true,
            strategy: CircuitBreakerStrategy::Combined,
            failure_threshold: 5,
            failure_rate_threshold: 0.5,
            min_request_threshold: 10,
            response_time_threshold: Duration::from_millis(1000),
            slow_request_threshold: 5,
            max_concurrent_requests: 100,
            recovery_timeout: Duration::from_secs(30),
            half_open_success_threshold: 2,
            half_open_max_calls: 10,
            exception_filter: ExceptionFilter::default(),
            progressive_recovery: ProgressiveRecoveryConfig::default(),
            fallback_enabled: true,
            fallback_timeout: Duration::from_millis(1000),
        }
    }
}

/// 简单熔断器配置（向后兼容）
#[derive(Debug, Clone)]
pub struct CircuitBreakerConfig {
    pub enabled: bool,
    pub failure_threshold: u32,
    pub recovery_timeout: Duration,
    pub half_open_success_threshold: u32,
}

impl Default for CircuitBreakerConfig {
    fn default() -> Self {
        Self {
            enabled: true,
            failure_threshold: 5,
            recovery_timeout: Duration::from_secs(30),
            half_open_success_threshold: 2,
        }
    }
}

impl From<CircuitBreakerConfig> for AdvancedCircuitBreakerConfig {
    fn from(config: CircuitBreakerConfig) -> Self {
        Self {
            enabled: config.enabled,
            strategy: CircuitBreakerStrategy::FailureCount,
            failure_threshold: config.failure_threshold,
            recovery_timeout: config.recovery_timeout,
            half_open_success_threshold: config.half_open_success_threshold,
            ..Default::default()
        }
    }
}

/// 熔断器实现（简单版本，向后兼容）
pub struct DefaultCircuitBreaker {
    states: Arc<RwLock<HashMap<String, StateMachine>>>,
    config: Arc<CircuitBreakerConfig>,
}

impl DefaultCircuitBreaker {
    pub fn new(config: CircuitBreakerConfig) -> Self {
        Self {
            states: Arc::new(RwLock::new(HashMap::new())),
            config: Arc::new(config),
        }
    }
}

/// 高级熔断器实现
pub struct AdvancedCircuitBreaker {
    states: Arc<RwLock<HashMap<String, StateMachine>>>,
    config: Arc<AdvancedCircuitBreakerConfig>,
    /// 当前并发请求计数器
    concurrent_requests: Arc<RwLock<HashMap<String, u32>>>,
    /// 请求统计信息
    request_stats: Arc<RwLock<HashMap<String, RequestStats>>>,
}

/// 请求统计信息
#[derive(Debug, Clone)]
struct RequestStats {
    /// 总请求数
    total_requests: u64,
    /// 成功请求数
    successful_requests: u64,
    /// 失败请求数
    failed_requests: u64,
    /// 慢请求数
    slow_requests: u64,
    /// 最近一次请求时间
    last_request_time: Option<SystemTime>,
    /// 平均响应时间（滚动平均）
    average_response_time: Duration,
    /// 渐进式恢复状态
    recovery_percentage: f64,
}

impl Default for RequestStats {
    fn default() -> Self {
        Self {
            total_requests: 0,
            successful_requests: 0,
            failed_requests: 0,
            slow_requests: 0,
            last_request_time: None,
            average_response_time: Duration::from_millis(0),
            recovery_percentage: 100.0,
        }
    }
}

impl AdvancedCircuitBreaker {
    pub fn new(config: AdvancedCircuitBreakerConfig) -> Self {
        Self {
            states: Arc::new(RwLock::new(HashMap::new())),
            config: Arc::new(config),
            concurrent_requests: Arc::new(RwLock::new(HashMap::new())),
            request_stats: Arc::new(RwLock::new(HashMap::new())),
        }
    }
    
    /// 从简单配置创建高级熔断器
    pub fn from_simple_config(config: CircuitBreakerConfig) -> Self {
        Self::new(config.into())
    }
    
    /// 检查是否应该熔断（根据策略）
    async fn should_trip(&self, service_name: &str) -> bool {
        let stats = self.request_stats.read().await;
        let concurrent = self.concurrent_requests.read().await;
        
        if let Some(request_stats) = stats.get(service_name) {
            match self.config.strategy {
                CircuitBreakerStrategy::FailureCount => {
                    // 基于当前失败请求数量判断（而不是状态机的失败计数）
                    request_stats.failed_requests >= self.config.failure_threshold as u64
                }
                CircuitBreakerStrategy::FailureRate => {
                    if request_stats.total_requests < self.config.min_request_threshold as u64 {
                        return false;
                    }
                    let failure_rate = request_stats.failed_requests as f64 / request_stats.total_requests as f64;
                    failure_rate >= self.config.failure_rate_threshold
                }
                CircuitBreakerStrategy::ResponseTime => {
                    request_stats.slow_requests >= self.config.slow_request_threshold as u64
                }
                CircuitBreakerStrategy::ConcurrentRequests => {
                    if let Some(concurrent_count) = concurrent.get(service_name) {
                        *concurrent_count >= self.config.max_concurrent_requests
                    } else {
                        false
                    }
                }
                CircuitBreakerStrategy::Combined => {
                    // 组合策略：满足任意一个条件就熔断
                    let failure_count_exceeded = request_stats.failed_requests >= self.config.failure_threshold as u64;
                    
                    let failure_rate_exceeded = if request_stats.total_requests >= self.config.min_request_threshold as u64 {
                        let failure_rate = request_stats.failed_requests as f64 / request_stats.total_requests as f64;
                        failure_rate >= self.config.failure_rate_threshold
                    } else {
                        false
                    };
                    
                    let slow_requests_exceeded = request_stats.slow_requests >= self.config.slow_request_threshold as u64;
                    
                    failure_count_exceeded || failure_rate_exceeded || slow_requests_exceeded
                }
            }
        } else {
            false
        }
    }
    
    /// 增加并发请求计数
    async fn increment_concurrent_requests(&self, service_name: &str) {
        let mut concurrent = self.concurrent_requests.write().await;
        *concurrent.entry(service_name.to_string()).or_insert(0) += 1;
    }
    
    /// 减少并发请求计数
    async fn decrement_concurrent_requests(&self, service_name: &str) {
        let mut concurrent = self.concurrent_requests.write().await;
        if let Some(count) = concurrent.get_mut(service_name) {
            *count = count.saturating_sub(1);
        }
    }
    
    /// 更新请求统计
    async fn update_request_stats(&self, service_name: &str, response_time: Duration, success: bool) {
        let mut stats = self.request_stats.write().await;
        let request_stats = stats.entry(service_name.to_string()).or_insert_with(RequestStats::default);
        
        request_stats.total_requests += 1;
        request_stats.last_request_time = Some(SystemTime::now());
        
        // 更新平均响应时间（指数加权平均）
        let weight = 0.1; // 10% 新值权重
        request_stats.average_response_time = Duration::from_millis(
            ((1.0 - weight) * request_stats.average_response_time.as_millis() as f64 +
             weight * response_time.as_millis() as f64) as u64
        );
        
        if success {
            request_stats.successful_requests += 1;
        } else {
            request_stats.failed_requests += 1;
        }
        
        // 检查是否为慢请求
        if response_time > self.config.response_time_threshold {
            request_stats.slow_requests += 1;
        }
    }
    
    /// 检查是否允许请求（考虑渐进式恢复）
    async fn should_allow_request(&self, service_name: &str) -> bool {
        // 先获取状态信息，避免持有锁时调用其他异步方法
        let (current_state, half_open_calls) = {
            let states = self.states.read().await;
            if let Some(state) = states.get(service_name) {
                (state.state(), state.half_open_calls())
            } else {
                return true; // 新服务，允许请求
            }
        };
        
        match current_state {
            CircuitBreakerState::Closed => true,
            CircuitBreakerState::Open => {
                // 检查是否超过恢复时间
                let states = self.states.read().await;
                if let Some(state) = states.get(service_name) {
                    state.can_attempt()
                } else {
                    true
                }
            }
            CircuitBreakerState::HalfOpen => {
                if self.config.progressive_recovery.enabled {
                    // 渐进式恢复：按照百分比允许请求
                    let recovery_percentage = {
                        let stats = self.request_stats.read().await;
                        stats.get(service_name)
                            .map(|request_stats| request_stats.recovery_percentage)
                            .unwrap_or(self.config.progressive_recovery.initial_recovery_percentage)
                    };
                    
                    use std::collections::hash_map::DefaultHasher;
                    use std::hash::{Hash, Hasher};
                    
                    let mut hasher = DefaultHasher::new();
                    service_name.hash(&mut hasher);
                    SystemTime::now().duration_since(std::time::UNIX_EPOCH).unwrap().as_nanos().hash(&mut hasher);
                    let random_value = (hasher.finish() % 100) as f64;
                    
                    random_value < recovery_percentage
                } else {
                    // 传统半开模式
                    half_open_calls < self.config.half_open_max_calls
                }
            }
        }
    }
    
    /// 更新渐进式恢复状态
    async fn update_progressive_recovery(&self, service_name: &str, success: bool) {
        if !self.config.progressive_recovery.enabled {
            return;
        }
        
        let mut stats = self.request_stats.write().await;
        if let Some(request_stats) = stats.get_mut(service_name) {
            if success {
                // 成功，增加恢复百分比
                request_stats.recovery_percentage = (request_stats.recovery_percentage + self.config.progressive_recovery.success_increment_percentage)
                    .min(self.config.progressive_recovery.max_recovery_percentage);
            } else if self.config.progressive_recovery.reset_on_failure {
                // 失败，重置恢复百分比
                request_stats.recovery_percentage = self.config.progressive_recovery.initial_recovery_percentage;
            }
        }
    }
}

#[async_trait]
impl CircuitBreaker for DefaultCircuitBreaker {
    async fn call(
        &self,
        service_name: &str,
        func: Box<dyn FnOnce() -> std::pin::Pin<Box<dyn std::future::Future<Output = ServiceResult<Response>> + Send>> + Send>,
    ) -> ServiceResult<Response> {
        if !self.config.enabled {
            return func().await;
        }

        let can_attempt = {
            let mut states_map = self.states.write().await;
            let state = states_map.entry(service_name.to_string()).or_insert_with(StateMachine::new);
            state.transition_to_half_open_if_ready();
            state.can_attempt()
        };

        if !can_attempt {
            return Err(rustcloud_core::ServiceError::CircuitBreakerOpen(
                format!("Circuit breaker is open for service: {}", service_name)
            ));
        }

        let result = func().await;

        let mut states_map = self.states.write().await;
        // The entry is guaranteed to exist now.
        let state = states_map.get_mut(service_name).unwrap();

        match &result {
            Ok(_) => state.record_success(self.config.half_open_success_threshold),
            Err(e) if e.is_network_error() => {
                state.record_failure(self.config.failure_threshold, self.config.recovery_timeout);
            }
            _ => { /* Do not record failure for non-network errors */ }
        }

        result
    }
    
    async fn call_with_fallback<T, F, Fut, FB>(
        &self,
        service_name: &str,
        operation: F,
        fallback: FB,
    ) -> ServiceResult<T>
    where
        T: Send + 'static,
        F: FnOnce() -> Fut + Send,
        Fut: std::future::Future<Output = ServiceResult<T>> + Send,
        FB: FnOnce() -> T + Send,
    {
        if !self.config.enabled {
            return operation().await;
        }

        let can_attempt = {
            let mut states_map = self.states.write().await;
            let state = states_map.entry(service_name.to_string()).or_insert_with(StateMachine::new);
            state.transition_to_half_open_if_ready();
            state.can_attempt()
        };

        if !can_attempt {
            // 熔断器打开，调用降级函数
            return Ok(fallback());
        }

        let result = operation().await;

        let mut states_map = self.states.write().await;
        let state = states_map.get_mut(service_name).unwrap();

        match &result {
            Ok(_) => {
                state.record_success(self.config.half_open_success_threshold);
                result
            }
            Err(e) if e.is_network_error() => {
                state.record_failure(self.config.failure_threshold, self.config.recovery_timeout);
                // 调用失败，返回降级结果
                Ok(fallback())
            }
            Err(_) => {
                // 非网络错误，直接返回错误
                result
            }
        }
    }

    async fn state(&self, service_name: &str) -> CircuitBreakerState {
        let states_map = self.states.read().await;
        states_map.get(service_name)
            .map(|state| state.state())
            .unwrap_or(CircuitBreakerState::Closed)
    }

    async fn metrics(&self, service_name: &str) -> CircuitBreakerMetrics {
        let states_map = self.states.read().await;
        states_map.get(service_name)
            .map(|state| state.metrics())
            .unwrap_or_else(|| StateMachine::new().metrics())
    }
    
    async fn get_call_stats(&self, service_name: &str) -> ServiceResult<CallStats> {
        let states_map = self.states.read().await;
        if let Some(state) = states_map.get(service_name) {
            let metrics = state.metrics();
            Ok(CallStats {
                total_calls: metrics.total_calls,
                successful_calls: metrics.success_calls,
                failed_calls: metrics.failed_calls,
                average_response_time: Duration::from_millis(0), // 简单实现暂时返回0
                last_call_time: metrics.last_failure_time,
            })
        } else {
            Ok(CallStats {
                total_calls: 0,
                successful_calls: 0,
                failed_calls: 0,
                average_response_time: Duration::from_millis(0),
                last_call_time: None,
            })
        }
    }
    
    async fn reset(&self, service_name: &str) -> ServiceResult<()> {
        let mut states_map = self.states.write().await;
        if let Some(state) = states_map.get_mut(service_name) {
            state.reset();
        }
        Ok(())
    }
    
    async fn force_open(&self, service_name: &str) -> ServiceResult<()> {
        let mut states_map = self.states.write().await;
        let state = states_map.entry(service_name.to_string()).or_insert_with(StateMachine::new);
        state.force_open(self.config.recovery_timeout);
        Ok(())
    }
    
    async fn force_close(&self, service_name: &str) -> ServiceResult<()> {
        let mut states_map = self.states.write().await;
        let state = states_map.entry(service_name.to_string()).or_insert_with(StateMachine::new);
        state.reset();
        Ok(())
    }
}

#[async_trait]
impl CircuitBreaker for AdvancedCircuitBreaker {
    async fn call(
        &self,
        service_name: &str,
        func: Box<dyn FnOnce() -> std::pin::Pin<Box<dyn std::future::Future<Output = ServiceResult<Response>> + Send>> + Send>,
    ) -> ServiceResult<Response> {
        if !self.config.enabled {
            return func().await;
        }

        // 检查是否允许请求
        if !self.should_allow_request(service_name).await {
            return Err(ServiceError::CircuitBreakerOpen(
                format!("Circuit breaker is open for service: {}", service_name)
            ));
        }

        // 增加并发请求计数
        self.increment_concurrent_requests(service_name).await;

        let start_time = std::time::Instant::now();
        let result = func().await;
        let response_time = start_time.elapsed();

        // 减少并发请求计数
        self.decrement_concurrent_requests(service_name).await;

        let success = result.is_ok();
        let should_count = if let Err(ref error) = result {
            self.config.exception_filter.should_count_exception(error)
        } else {
            true
        };

        if should_count {
            // 更新请求统计
            self.update_request_stats(service_name, response_time, success).await;

            // 先检查是否应该熔断，避免在持有锁时调用其他方法
            let should_trip = if !success {
                self.should_trip(service_name).await
            } else {
                false
            };

            // 更新状态机
            {
                let mut states_map = self.states.write().await;
                let state = states_map.entry(service_name.to_string()).or_insert_with(StateMachine::new);
                
                if success {
                    state.record_success(self.config.half_open_success_threshold);
                } else if should_trip {
                    state.record_failure(self.config.failure_threshold, self.config.recovery_timeout);
                }
            }

            // 更新渐进式恢复状态
            self.update_progressive_recovery(service_name, success).await;
        }

        result
    }

    async fn call_with_fallback<T, F, Fut, FB>(
        &self,
        service_name: &str,
        operation: F,
        fallback: FB,
    ) -> ServiceResult<T>
    where
        T: Send + 'static,
        F: FnOnce() -> Fut + Send,
        Fut: std::future::Future<Output = ServiceResult<T>> + Send,
        FB: FnOnce() -> T + Send,
    {
        if !self.config.enabled {
            return operation().await;
        }

        if !self.should_allow_request(service_name).await {
            // 熔断器打开，返回降级结果
            return Ok(fallback());
        }

        self.increment_concurrent_requests(service_name).await;

        let start_time = std::time::Instant::now();
        
        // 使用超时控制
        let result = if self.config.fallback_timeout > Duration::from_millis(0) {
            tokio::time::timeout(self.config.fallback_timeout, operation()).await
                .map_err(|_| ServiceError::TimeoutError("Operation timeout".to_string()))?
        } else {
            operation().await
        };
        
        let response_time = start_time.elapsed();

        self.decrement_concurrent_requests(service_name).await;

        let success = result.is_ok();
        let should_count = if let Err(ref error) = result {
            self.config.exception_filter.should_count_exception(error)
        } else {
            true
        };

        if should_count {
            self.update_request_stats(service_name, response_time, success).await;

            {
                let mut states_map = self.states.write().await;
                let state = states_map.entry(service_name.to_string()).or_insert_with(StateMachine::new);
                
                if success {
                    state.record_success(self.config.half_open_success_threshold);
                } else {
                    // 对于失败，总是记录到状态机，让状态机自己决定是否熔断
                    state.record_failure(self.config.failure_threshold, self.config.recovery_timeout);
                }
            }

            self.update_progressive_recovery(service_name, success).await;
        }

        match result {
            Ok(value) => Ok(value),
            Err(_) => {
                // 操作失败，返回降级结果
                Ok(fallback())
            }
        }
    }

    async fn state(&self, service_name: &str) -> CircuitBreakerState {
        let states_map = self.states.read().await;
        states_map.get(service_name)
            .map(|state| state.state())
            .unwrap_or(CircuitBreakerState::Closed)
    }

    async fn metrics(&self, service_name: &str) -> CircuitBreakerMetrics {
        let states_map = self.states.read().await;
        states_map.get(service_name)
            .map(|state| state.metrics())
            .unwrap_or_else(|| StateMachine::new().metrics())
    }

    async fn get_call_stats(&self, service_name: &str) -> ServiceResult<CallStats> {
        let stats_map = self.request_stats.read().await;
        if let Some(request_stats) = stats_map.get(service_name) {
            Ok(CallStats {
                total_calls: request_stats.total_requests,
                successful_calls: request_stats.successful_requests,
                failed_calls: request_stats.failed_requests,
                average_response_time: request_stats.average_response_time,
                last_call_time: request_stats.last_request_time,
            })
        } else {
            Ok(CallStats {
                total_calls: 0,
                successful_calls: 0,
                failed_calls: 0,
                average_response_time: Duration::from_millis(0),
                last_call_time: None,
            })
        }
    }

    async fn reset(&self, service_name: &str) -> ServiceResult<()> {
        // 重置状态机
        {
            let mut states_map = self.states.write().await;
            if let Some(state) = states_map.get_mut(service_name) {
                state.reset();
            }
        }
        
        // 重置请求统计
        {
            let mut stats_map = self.request_stats.write().await;
            if let Some(stats) = stats_map.get_mut(service_name) {
                *stats = RequestStats::default();
            }
        }
        
        // 重置并发请求计数
        {
            let mut concurrent_map = self.concurrent_requests.write().await;
            concurrent_map.remove(service_name);
        }
        
        Ok(())
    }

    async fn force_open(&self, service_name: &str) -> ServiceResult<()> {
        let mut states_map = self.states.write().await;
        let state = states_map.entry(service_name.to_string()).or_insert_with(StateMachine::new);
        state.force_open(self.config.recovery_timeout);
        Ok(())
    }

    async fn force_close(&self, service_name: &str) -> ServiceResult<()> {
        let mut states_map = self.states.write().await;
        let state = states_map.entry(service_name.to_string()).or_insert_with(StateMachine::new);
        state.reset();
        Ok(())
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use rustcloud_core::ServiceError;
    use std::time::Duration;

    #[tokio::test]
    async fn test_circuit_breaker_opens_after_threshold() {
        let config = CircuitBreakerConfig {
            failure_threshold: 2,
            recovery_timeout: Duration::from_millis(100),
            ..Default::default()
        };
        let breaker = DefaultCircuitBreaker::new(config);
        let service_name = "test-service";

        // First failure
        let _ = breaker.call(service_name, Box::new(|| Box::pin(async { Err(ServiceError::NetworkError("err".into())) as ServiceResult<Response> }))).await;
        assert_eq!(breaker.state(service_name).await, CircuitBreakerState::Closed);

        // Second failure, should trip the breaker
        let _ = breaker.call(service_name, Box::new(|| Box::pin(async { Err(ServiceError::NetworkError("err".into())) as ServiceResult<Response> }))).await;
        assert_eq!(breaker.state(service_name).await, CircuitBreakerState::Open);
    }

    #[tokio::test]
    async fn test_circuit_breaker_allows_call_when_open_after_timeout() {
        let config = CircuitBreakerConfig {
            failure_threshold: 1,
            recovery_timeout: Duration::from_millis(50),
            ..Default::default()
        };
        let breaker = DefaultCircuitBreaker::new(config);
        let service_name = "test-service";

        // Trip the breaker
        let _ = breaker.call(service_name, Box::new(|| Box::pin(async { Err(ServiceError::NetworkError("err".into())) as ServiceResult<Response> }))).await;
        assert_eq!(breaker.state(service_name).await, CircuitBreakerState::Open);

        // Wait for recovery timeout
        tokio::time::sleep(Duration::from_millis(60)).await;

        // Breaker should now be in HalfOpen state and allow a call
        let result = breaker.call(service_name, Box::new(|| Box::pin(async { Ok(Response::new(200)) }))).await;
        assert!(result.is_ok());
        assert_eq!(breaker.state(service_name).await, CircuitBreakerState::HalfOpen);
    }

    #[tokio::test]
    async fn test_circuit_breaker_closes_after_half_open_success() {
        let config = CircuitBreakerConfig {
            failure_threshold: 1,
            recovery_timeout: Duration::from_millis(50),
            half_open_success_threshold: 1,
            ..Default::default()
        };
        let breaker = DefaultCircuitBreaker::new(config);
        let service_name = "test-service";

        // Trip the breaker and wait for half-open
        let _ = breaker.call(service_name, Box::new(|| Box::pin(async { Err(ServiceError::NetworkError("err".into())) as ServiceResult<Response> }))).await;
        tokio::time::sleep(Duration::from_millis(60)).await;

        // First call in half-open should succeed
        let _ = breaker.call(service_name, Box::new(|| Box::pin(async { Ok(Response::new(200)) }))).await;
        // After one success, it should close
        assert_eq!(breaker.state(service_name).await, CircuitBreakerState::Closed);
    }
    
    #[tokio::test]
    async fn test_advanced_circuit_breaker_progressive_recovery() {
        let mut config = AdvancedCircuitBreakerConfig::default();
        config.strategy = CircuitBreakerStrategy::FailureCount;
        config.failure_threshold = 2;
        config.recovery_timeout = Duration::from_millis(50);
        config.progressive_recovery.enabled = true;
        config.progressive_recovery.initial_recovery_percentage = 50.0;
        
        let breaker = AdvancedCircuitBreaker::new(config);
        
        // 简单验证熔断器创建成功
        assert!(matches!(breaker.state("test-service").await, CircuitBreakerState::Closed));
    }
    
    #[tokio::test]
    async fn test_advanced_circuit_breaker_exception_filter() {
        let mut config = AdvancedCircuitBreakerConfig::default();
        config.strategy = CircuitBreakerStrategy::FailureCount;
        config.failure_threshold = 5; // 设置较高的阈值避免熔断
        
        // 只计入网络错误
        config.exception_filter.counted_exceptions.clear();
        config.exception_filter.counted_exceptions.insert("NetworkError".to_string());
        config.exception_filter.count_all_by_default = false;
        
        let breaker = AdvancedCircuitBreaker::new(config);
        
        // 验证异常过滤器功能
        let network_error = ServiceError::NetworkError("test".to_string());
        let validation_error = ServiceError::ValidationError(rustcloud_core::ValidationError::InvalidServiceId("test".to_string()));
        
        assert!(breaker.config.exception_filter.should_count_exception(&network_error));
        assert!(!breaker.config.exception_filter.should_count_exception(&validation_error));
    }
    
    #[tokio::test]
    async fn test_advanced_circuit_breaker_call_with_fallback() {
        let mut config = AdvancedCircuitBreakerConfig::default();
        config.strategy = CircuitBreakerStrategy::FailureCount; // 明确使用失败数量策略
        config.failure_threshold = 2;
        config.recovery_timeout = Duration::from_millis(50);
        config.fallback_enabled = true;
        config.fallback_timeout = Duration::from_millis(100);
        
        let breaker = AdvancedCircuitBreaker::new(config);
        
        // 测试正常情况下的调用
        let result = breaker.call_with_fallback(
            "test-service",
            || async { Ok(42) },
            || 999, // 降级值
        ).await;
        
        assert!(result.is_ok());
        assert_eq!(result.unwrap(), 42);
        
        // 模拟失败使熔断器打开
        for _ in 0..2 {
            let _ = breaker.call_with_fallback(
                "test-service",
                || async { Err::<i32, _>(ServiceError::NetworkError("Connection failed".to_string())) },
                || 999,
            ).await;
        }
        
        // 现在熔断器应该处于打开状态，应该返回降级值
        let result = breaker.call_with_fallback(
            "test-service", 
            || async { Ok(42) }, // 这个不应该被调用
            || 999, // 应该返回这个值
        ).await;
        
        assert!(result.is_ok());
        assert_eq!(result.unwrap(), 999); // 应该返回降级值
        
        // 验证熔断器状态
        assert_eq!(breaker.state("test-service").await, CircuitBreakerState::Open);
    }

    #[tokio::test]
    async fn test_circuit_breaker_reopens_after_half_open_failure() {
        let config = CircuitBreakerConfig {
            failure_threshold: 1,
            recovery_timeout: Duration::from_millis(50),
            ..Default::default()
        };
        let breaker = DefaultCircuitBreaker::new(config);
        let service_name = "test-service";

        // Trip the breaker and wait for half-open
        let _ = breaker.call(service_name, Box::new(|| Box::pin(async { Err(ServiceError::NetworkError("err".into())) as ServiceResult<Response> }))).await;
        tokio::time::sleep(Duration::from_millis(60)).await;

        // First call in half-open fails, should re-open the circuit
        let _ = breaker.call(service_name, Box::new(|| Box::pin(async { Err(ServiceError::NetworkError("err".into())) as ServiceResult<Response> }))).await;
        assert_eq!(breaker.state(service_name).await, CircuitBreakerState::Open);
    }
}