use async_trait::async_trait;
use rustcloud_core::{ServiceError, ServiceResult, ServiceInstance, RetryCondition};
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime};
use tokio::sync::RwLock;
use serde::{Deserialize, Serialize};

/// 实例健康状态
#[derive(Debug, Clone, PartialEq)]
pub enum InstanceHealth {
    Healthy,
    Degraded { failure_rate: f64 },
    Unhealthy,
    CircuitOpen,
}

/// 实例统计信息
#[derive(Debug, Clone)]
pub struct InstanceStats {
    pub total_requests: u64,
    pub successful_requests: u64,
    pub failed_requests: u64,
    pub last_request_time: Option<SystemTime>,
    pub average_response_time: Duration,
    pub consecutive_failures: u32,
    pub health_status: InstanceHealth,
    pub last_health_check: SystemTime,
}

impl Default for InstanceStats {
    fn default() -> Self {
        Self {
            total_requests: 0,
            successful_requests: 0,
            failed_requests: 0,
            last_request_time: None,
            average_response_time: Duration::from_millis(0),
            consecutive_failures: 0,
            health_status: InstanceHealth::Healthy,
            last_health_check: SystemTime::now(),
        }
    }
}

impl InstanceStats {
    /// 更新统计信息
    pub fn update(&mut self, success: bool, response_time: Duration) {
        self.total_requests += 1;
        self.last_request_time = Some(SystemTime::now());
        
        // 更新平均响应时间（指数加权移动平均）
        let alpha = 0.1;
        if self.total_requests == 1 {
            self.average_response_time = response_time;
        } else {
            let current_avg = self.average_response_time.as_millis() as f64;
            let new_time = response_time.as_millis() as f64;
            let new_avg = current_avg * (1.0 - alpha) + new_time * alpha;
            self.average_response_time = Duration::from_millis(new_avg as u64);
        }
        
        if success {
            self.successful_requests += 1;
            self.consecutive_failures = 0;
        } else {
            self.failed_requests += 1;
            self.consecutive_failures += 1;
        }
        
        self.update_health_status();
    }
    
    /// 更新健康状态
    fn update_health_status(&mut self) {
        if self.total_requests < 10 {
            // 请求太少，保持健康状态
            self.health_status = InstanceHealth::Healthy;
            return;
        }
        
        let failure_rate = self.failed_requests as f64 / self.total_requests as f64;
        
        // 根据连续失败次数和失败率确定健康状态
        if self.consecutive_failures >= 5 {
            self.health_status = InstanceHealth::CircuitOpen;
        } else if failure_rate > 0.5 {
            self.health_status = InstanceHealth::Unhealthy;
        } else if failure_rate > 0.1 {
            self.health_status = InstanceHealth::Degraded { failure_rate };
        } else {
            self.health_status = InstanceHealth::Healthy;
        }
        
        self.last_health_check = SystemTime::now();
    }
    
    /// 获取健康评分（0-100）
    pub fn health_score(&self) -> u8 {
        match &self.health_status {
            InstanceHealth::Healthy => 100,
            InstanceHealth::Degraded { failure_rate } => {
                ((1.0 - failure_rate) * 80.0) as u8
            }
            InstanceHealth::Unhealthy => 20,
            InstanceHealth::CircuitOpen => 0,
        }
    }
    
    /// 检查实例是否可用
    pub fn is_available(&self) -> bool {
        !matches!(self.health_status, InstanceHealth::CircuitOpen)
    }
}

/// 故障转移策略
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum FailoverStrategy {
    /// 快速故障转移 - 立即切换到下一个实例
    FastFailover,
    /// 逐步降级 - 根据健康评分逐步减少流量
    GradualDegradation,
    /// 粘性会话 - 尽量保持会话亲和性
    StickySession { session_timeout: Duration },
    /// 区域优先 - 优先使用同一区域的实例
    ZoneAware { prefer_same_zone: bool },
}

/// 重试决策
#[derive(Debug, Clone)]
pub struct RetryDecision {
    pub should_retry: bool,
    pub delay: Duration,
    pub next_instance: Option<ServiceInstance>,
    pub reason: String,
}

/// 智能重试管理器
pub struct IntelligentRetryManager {
    /// 实例统计信息
    instance_stats: Arc<RwLock<HashMap<String, InstanceStats>>>,
    /// 故障转移策略
    failover_strategy: FailoverStrategy,
    /// 重试条件
    retry_conditions: Vec<RetryCondition>,
    /// 最大重试次数
    max_retries: u32,
    /// 重试延迟配置
    base_delay: Duration,
    max_delay: Duration,
}

impl IntelligentRetryManager {
    pub fn new(
        failover_strategy: FailoverStrategy,
        retry_conditions: Vec<RetryCondition>,
        max_retries: u32,
        base_delay: Duration,
        max_delay: Duration,
    ) -> Self {
        Self {
            instance_stats: Arc::new(RwLock::new(HashMap::new())),
            failover_strategy,
            retry_conditions,
            max_retries,
            base_delay,
            max_delay,
        }
    }
    
    /// 记录请求结果
    pub async fn record_result(
        &self,
        instance: &ServiceInstance,
        success: bool,
        response_time: Duration,
    ) {
        let instance_key = format!("{}:{}", instance.host, instance.port);
        let mut stats_map = self.instance_stats.write().await;
        let stats = stats_map.entry(instance_key).or_default();
        stats.update(success, response_time);
    }
    
    /// 决定是否重试
    pub async fn should_retry(
        &self,
        error: &ServiceError,
        attempt: u32,
        available_instances: &[ServiceInstance],
        current_instance: &ServiceInstance,
    ) -> RetryDecision {
        if attempt >= self.max_retries {
            return RetryDecision {
                should_retry: false,
                delay: Duration::ZERO,
                next_instance: None,
                reason: "Max retries exceeded".to_string(),
            };
        }
        
        // 检查错误是否符合重试条件
        let should_retry_error = self.retry_conditions.iter().any(|condition| {
            matches!(condition, RetryCondition::NetworkError) && error.is_network_error() ||
            matches!(condition, RetryCondition::Timeout) && error.is_timeout_error()
        });
        
        if !should_retry_error {
            return RetryDecision {
                should_retry: false,
                delay: Duration::ZERO,
                next_instance: None,
                reason: "Error not retryable".to_string(),
            };
        }
        
        // 根据故障转移策略选择下一个实例
        let next_instance = self.select_next_instance(
            available_instances,
            current_instance,
            attempt,
        ).await;
        
        if let Some(instance) = &next_instance {
            let delay = self.calculate_retry_delay(attempt, instance).await;
            RetryDecision {
                should_retry: true,
                delay,
                next_instance: next_instance.clone(),
                reason: format!("Retrying with instance {}:{}", instance.host, instance.port),
            }
        } else {
            RetryDecision {
                should_retry: false,
                delay: Duration::ZERO,
                next_instance: None,
                reason: "No available instances for retry".to_string(),
            }
        }
    }
    
    /// 选择下一个实例
    async fn select_next_instance(
        &self,
        available_instances: &[ServiceInstance],
        current_instance: &ServiceInstance,
        attempt: u32,
    ) -> Option<ServiceInstance> {
        let stats_map = self.instance_stats.read().await;
        
        let mut candidates: Vec<_> = available_instances
            .iter()
            .filter(|instance| {
                // 排除当前失败的实例（第一次重试时）
                if attempt == 1 {
                    instance.host != current_instance.host || instance.port != current_instance.port
                } else {
                    true
                }
            })
            .filter(|instance| {
                // 检查实例健康状态
                let instance_key = format!("{}:{}", instance.host, instance.port);
                stats_map.get(&instance_key)
                    .map(|stats| stats.is_available())
                    .unwrap_or(true)
            })
            .collect();
        
        if candidates.is_empty() {
            return None;
        }
        
        match &self.failover_strategy {
            FailoverStrategy::FastFailover => {
                // 选择第一个可用实例
                candidates.first().map(|&instance| instance.clone())
            }
            FailoverStrategy::GradualDegradation => {
                // 根据健康评分选择最佳实例
                candidates.sort_by_key(|instance| {
                    let instance_key = format!("{}:{}", instance.host, instance.port);
                    let health_score = stats_map.get(&instance_key)
                        .map(|stats| stats.health_score())
                        .unwrap_or(100);
                    255 - health_score as u16 // 降序排列
                });
                candidates.first().map(|&instance| instance.clone())
            }
            FailoverStrategy::StickySession { .. } => {
                // TODO: 实现会话亲和性逻辑
                candidates.first().map(|&instance| instance.clone())
            }
            FailoverStrategy::ZoneAware { prefer_same_zone } => {
                if *prefer_same_zone {
                    // 优先选择同一区域的实例
                    let current_zone = current_instance.metadata.get("zone");
                    let same_zone_candidates: Vec<_> = candidates
                        .iter()
                        .filter(|instance| {
                            instance.metadata.get("zone") == current_zone
                        })
                        .collect();
                    
                    if !same_zone_candidates.is_empty() {
                        same_zone_candidates.first().map(|&&instance| instance.clone())
                    } else {
                        candidates.first().map(|&instance| instance.clone())
                    }
                } else {
                    candidates.first().map(|&instance| instance.clone())
                }
            }
        }
    }
    
    /// 计算重试延迟
    async fn calculate_retry_delay(&self, attempt: u32, instance: &ServiceInstance) -> Duration {
        let instance_key = format!("{}:{}", instance.host, instance.port);
        let stats_map = self.instance_stats.read().await;
        
        // 基础指数退避
        let base_delay = self.base_delay * 2_u32.pow(attempt.saturating_sub(1));
        let capped_delay = std::cmp::min(base_delay, self.max_delay);
        
        // 根据实例健康状态调整延迟
        if let Some(stats) = stats_map.get(&instance_key) {
            match &stats.health_status {
                InstanceHealth::Healthy => capped_delay,
                InstanceHealth::Degraded { .. } => capped_delay + Duration::from_millis(100),
                InstanceHealth::Unhealthy => capped_delay + Duration::from_millis(500),
                InstanceHealth::CircuitOpen => capped_delay + Duration::from_secs(1),
            }
        } else {
            capped_delay
        }
    }
    
    /// 获取实例统计信息
    pub async fn get_instance_stats(&self, instance: &ServiceInstance) -> Option<InstanceStats> {
        let instance_key = format!("{}:{}", instance.host, instance.port);
        let stats_map = self.instance_stats.read().await;
        stats_map.get(&instance_key).cloned()
    }
    
    /// 获取所有实例统计信息
    pub async fn get_all_stats(&self) -> HashMap<String, InstanceStats> {
        let stats_map = self.instance_stats.read().await;
        stats_map.clone()
    }
    
    /// 重置实例统计信息
    pub async fn reset_instance_stats(&self, instance: &ServiceInstance) {
        let instance_key = format!("{}:{}", instance.host, instance.port);
        let mut stats_map = self.instance_stats.write().await;
        stats_map.remove(&instance_key);
    }
    
    /// 清理过期的统计信息
    pub async fn cleanup_expired_stats(&self, max_age: Duration) {
        let mut stats_map = self.instance_stats.write().await;
        let now = SystemTime::now();
        
        stats_map.retain(|_, stats| {
            stats.last_request_time
                .and_then(|time| now.duration_since(time).ok())
                .map(|age| age < max_age)
                .unwrap_or(false)
        });
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use rustcloud_core::ServiceInstance;
    
    #[test]
    fn test_instance_stats_update() {
        let mut stats = InstanceStats::default();
        
        // 测试成功请求
        stats.update(true, Duration::from_millis(100));
        assert_eq!(stats.total_requests, 1);
        assert_eq!(stats.successful_requests, 1);
        assert_eq!(stats.consecutive_failures, 0);
        
        // 测试失败请求
        stats.update(false, Duration::from_millis(200));
        assert_eq!(stats.total_requests, 2);
        assert_eq!(stats.failed_requests, 1);
        assert_eq!(stats.consecutive_failures, 1);
    }
    
    #[test]
    fn test_health_score_calculation() {
        let mut stats = InstanceStats::default();
        
        // 健康状态
        for _ in 0..10 {
            stats.update(true, Duration::from_millis(100));
        }
        assert_eq!(stats.health_score(), 100);
        
        // 降级状态
        for _ in 0..5 {
            stats.update(false, Duration::from_millis(100));
        }
        assert!(stats.health_score() < 100);
        assert!(stats.health_score() > 50);
    }
    
    #[tokio::test]
    async fn test_retry_manager() {
        let manager = IntelligentRetryManager::new(
            FailoverStrategy::FastFailover,
            vec![RetryCondition::NetworkError],
            3,
            Duration::from_millis(100),
            Duration::from_secs(5),
        );
        
        let instance = ServiceInstance::new("test", "127.0.0.1", 8080, "http");
        
        // 记录失败
        manager.record_result(&instance, false, Duration::from_millis(1000)).await;
        
        let stats = manager.get_instance_stats(&instance).await;
        assert!(stats.is_some());
        assert_eq!(stats.unwrap().failed_requests, 1);
    }
}