use async_trait::async_trait;
use crate::{IntelligentRetryManager, FailoverStrategy};
use rustcloud_core::{
    Request, Response, ServiceResult, ServiceError, ServiceInstance, ServiceRegistry,
    LoadBalancer, Transport, ServiceProxy, CallStats, ProxyConfig,
    RetryConfig, TimeoutConfig, ConnectionPoolConfig, RetryCondition, RequestContext,
    CircuitBreaker
};
use rustcloud_circuit_breaker::AdvancedCircuitBreaker;
use std::collections::HashMap;
use std::sync::Arc;
use std::time::{Duration, Instant, SystemTime};
use tokio::sync::{RwLock, Semaphore};
use tokio::time::{sleep, timeout};
use serde::{Deserialize, Serialize};

/// 连接状态
#[derive(Debug, Clone)]
pub struct ConnectionStats {
    pub active_connections: u32,
    pub total_requests: u64,
    pub successful_requests: u64,
    pub failed_requests: u64,
    pub average_response_time: Duration,
    pub last_request_time: Option<SystemTime>,
}

impl Default for ConnectionStats {
    fn default() -> Self {
        Self {
            active_connections: 0,
            total_requests: 0,
            successful_requests: 0,
            failed_requests: 0,
            average_response_time: Duration::from_millis(0),
            last_request_time: None,
        }
    }
}

/// 请求拦截器
#[async_trait]
pub trait RequestInterceptor: Send + Sync {
    /// 在请求发送前执行
    async fn before_request(&self, request: &mut Request, context: &RequestContext) -> ServiceResult<()>;
    
    /// 获取拦截器名称
    fn name(&self) -> &str;
    
    /// 获取拦截器顺序（数值越小优先级越高）
    fn order(&self) -> i32 {
        0
    }
}

/// 响应拦截器
#[async_trait]
pub trait ResponseInterceptor: Send + Sync {
    /// 在响应返回前执行
    async fn after_response(&self, request: &Request, response: &mut Response, context: &RequestContext) -> ServiceResult<()>;
    
    /// 在发生错误时执行
    async fn on_error(&self, request: &Request, error: &ServiceError, context: &RequestContext) -> ServiceResult<()> {
        // 默认空实现
        Ok(())
    }
    
    /// 获取拦截器名称
    fn name(&self) -> &str;
    
    /// 获取拦截器顺序（数值越小优先级越高）
    fn order(&self) -> i32 {
        0
    }
}

/// 缓存策略
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum CacheStrategy {
    /// 不缓存
    None,
    /// 基于时间的缓存
    TimeBasedCache { ttl: Duration },
    /// 基于LRU的缓存
    LruCache { max_size: usize, ttl: Duration },
    /// 基于键的缓存
    KeyBasedCache { max_size: usize, ttl: Duration, key_generator: String },
}

/// 缓存配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct CacheConfig {
    pub enabled: bool,
    pub strategy: CacheStrategy,
    pub compress: bool,
    pub serialize_format: String, // json, binary等
}

impl Default for CacheConfig {
    fn default() -> Self {
        Self {
            enabled: false,
            strategy: CacheStrategy::TimeBasedCache { ttl: Duration::from_secs(300) },
            compress: false,
            serialize_format: "json".to_string(),
        }
    }
}

/// 缓存条目
#[derive(Debug, Clone)]
struct CacheEntry {
    response: Response,
    created_at: SystemTime,
    ttl: Duration,
    access_count: u64,
    last_access: SystemTime,
}

impl CacheEntry {
    fn new(response: Response, ttl: Duration) -> Self {
        let now = SystemTime::now();
        Self {
            response,
            created_at: now,
            ttl,
            access_count: 0,
            last_access: now,
        }
    }
    
    fn is_expired(&self) -> bool {
        self.created_at.elapsed().unwrap_or(Duration::MAX) > self.ttl
    }
    
    fn access(&mut self) -> &Response {
        self.access_count += 1;
        self.last_access = SystemTime::now();
        &self.response
    }
}

/// 高级客户端代理
pub struct AdvancedClientProxy {
    registry: Arc<dyn ServiceRegistry>,
    load_balancer: Arc<dyn LoadBalancer>,
    transport: Arc<dyn Transport>,
    circuit_breaker: Arc<AdvancedCircuitBreaker>,
    config: ProxyConfig,
    cache_config: CacheConfig,
    
    /// 请求拦截器
    request_interceptors: Arc<RwLock<Vec<Arc<dyn RequestInterceptor>>>>,
    /// 响应拦截器
    response_interceptors: Arc<RwLock<Vec<Arc<dyn ResponseInterceptor>>>>,
    /// 连接统计
    connection_stats: Arc<RwLock<HashMap<String, ConnectionStats>>>,
    /// 并发控制
    connection_semaphore: Arc<Semaphore>,
    /// 响应缓存
    response_cache: Arc<RwLock<HashMap<String, CacheEntry>>>,
    /// 故障转移管理器
    retry_manager: Arc<IntelligentRetryManager>,
}

impl AdvancedClientProxy {
    pub fn new(
        registry: Arc<dyn ServiceRegistry>,
        load_balancer: Arc<dyn LoadBalancer>,
        transport: Arc<dyn Transport>,
        circuit_breaker: Arc<AdvancedCircuitBreaker>,
        config: ProxyConfig,
    ) -> Self {
        let connection_semaphore = Arc::new(Semaphore::new(config.connection_pool.max_connections));
        
        // 初始化故障转移管理器
        let retry_manager = Arc::new(IntelligentRetryManager::new(
            FailoverStrategy::FastFailover,
            config.retry.retry_on.clone(),
            config.retry.max_attempts,
            config.retry.base_delay,
            config.retry.max_delay,
        ));
        
        Self {
            registry,
            load_balancer,
            transport,
            circuit_breaker,
            config,
            cache_config: CacheConfig::default(),
            request_interceptors: Arc::new(RwLock::new(Vec::new())),
            response_interceptors: Arc::new(RwLock::new(Vec::new())),
            connection_stats: Arc::new(RwLock::new(HashMap::new())),
            connection_semaphore,
            response_cache: Arc::new(RwLock::new(HashMap::new())),
            retry_manager,
        }
    }
    
    /// 添加请求拦截器
    pub async fn add_request_interceptor(&self, interceptor: Arc<dyn RequestInterceptor>) {
        let mut interceptors = self.request_interceptors.write().await;
        interceptors.push(interceptor);
        // 按优先级排序
        interceptors.sort_by_key(|i| i.order());
    }
    
    /// 添加响应拦截器
    pub async fn add_response_interceptor(&self, interceptor: Arc<dyn ResponseInterceptor>) {
        let mut interceptors = self.response_interceptors.write().await;
        interceptors.push(interceptor);
        // 按优先级排序
        interceptors.sort_by_key(|i| i.order());
    }
    
    /// 设置缓存配置
    pub fn with_cache_config(mut self, cache_config: CacheConfig) -> Self {
        self.cache_config = cache_config;
        self
    }
    
    /// 生成缓存键
    fn generate_cache_key(&self, request: &Request) -> String {
        match &self.cache_config.strategy {
            CacheStrategy::KeyBasedCache { key_generator, .. } => {
                // 基于自定义键生成器
                format!("{}:{}:{}:{}", key_generator, request.service_name, request.method, request.path)
            }
            _ => {
                // 默认键生成策略
                format!("{}:{}:{}", request.service_name, request.method, request.path)
            }
        }
    }
    
    /// 从缓存获取响应
    async fn get_cached_response(&self, cache_key: &str) -> Option<Response> {
        if !self.cache_config.enabled {
            return None;
        }
        
        let mut cache = self.response_cache.write().await;
        if let Some(entry) = cache.get_mut(cache_key) {
            if !entry.is_expired() {
                return Some(entry.access().clone());
            } else {
                cache.remove(cache_key);
            }
        }
        None
    }
    
    /// 将响应存储到缓存
    async fn cache_response(&self, cache_key: String, response: &Response) {
        if !self.cache_config.enabled || !response.is_success() {
            return;
        }
        
        let ttl = match &self.cache_config.strategy {
            CacheStrategy::TimeBasedCache { ttl } => *ttl,
            CacheStrategy::LruCache { ttl, .. } => *ttl,
            CacheStrategy::KeyBasedCache { ttl, .. } => *ttl,
            CacheStrategy::None => return,
        };
        
        let entry = CacheEntry::new(response.clone(), ttl);
        let mut cache = self.response_cache.write().await;
        
        // 检查缓存大小限制
        match &self.cache_config.strategy {
            CacheStrategy::LruCache { max_size, .. } | 
            CacheStrategy::KeyBasedCache { max_size, .. } => {
                if cache.len() >= *max_size {
                    // 删除最旧的条目
                    if let Some(oldest_key) = cache
                        .iter()
                        .min_by_key(|(_, entry)| entry.last_access)
                        .map(|(key, _)| key.clone())
                    {
                        cache.remove(&oldest_key);
                    }
                }
            }
            _ => {}
        }
        
        cache.insert(cache_key, entry);
    }
    
    /// 清理过期的缓存条目
    pub async fn cleanup_expired_cache(&self) {
        let mut cache = self.response_cache.write().await;
        cache.retain(|_, entry| !entry.is_expired());
    }
    
    /// 执行请求拦截器
    async fn execute_request_interceptors(&self, request: &mut Request, context: &RequestContext) -> ServiceResult<()> {
        let interceptors = self.request_interceptors.read().await;
        for interceptor in interceptors.iter() {
            interceptor.before_request(request, context).await?;
        }
        Ok(())
    }
    
    /// 执行响应拦截器
    async fn execute_response_interceptors(&self, request: &Request, response: &mut Response, context: &RequestContext) -> ServiceResult<()> {
        let interceptors = self.response_interceptors.read().await;
        for interceptor in interceptors.iter() {
            interceptor.after_response(request, response, context).await?;
        }
        Ok(())
    }
    
    /// 执行错误拦截器
    async fn execute_error_interceptors(&self, request: &Request, error: &ServiceError, context: &RequestContext) -> ServiceResult<()> {
        let interceptors = self.response_interceptors.read().await;
        for interceptor in interceptors.iter() {
            interceptor.on_error(request, error, context).await?;
        }
        Ok(())
    }
    
    /// 更新连接统计
    async fn update_connection_stats(&self, service_name: &str, success: bool, response_time: Duration) {
        let mut stats_map = self.connection_stats.write().await;
        let stats = stats_map.entry(service_name.to_string()).or_default();
        
        stats.total_requests += 1;
        stats.last_request_time = Some(SystemTime::now());
        
        if success {
            stats.successful_requests += 1;
        } else {
            stats.failed_requests += 1;
        }
        
        // 更新平均响应时间（指数加权移动平均）
        let alpha = 0.1;
        let new_avg = stats.average_response_time.as_millis() as f64 * (1.0 - alpha) + 
                      response_time.as_millis() as f64 * alpha;
        stats.average_response_time = Duration::from_millis(new_avg as u64);
    }
    
    /// 检查重试条件
    fn should_retry(&self, error: &ServiceError, response: Option<&Response>) -> bool {
        for condition in &self.config.retry.retry_on {
            match condition {
                RetryCondition::NetworkError => {
                    if error.is_network_error() {
                        return true;
                    }
                }
                RetryCondition::Timeout => {
                    if error.is_timeout_error() {
                        return true;
                    }
                }
                RetryCondition::HttpStatus(status) => {
                    if let Some(resp) = response {
                        if resp.status == *status {
                            return true;
                        }
                    }
                }
                RetryCondition::HttpStatusRange(start, end) => {
                    if let Some(resp) = response {
                        if resp.status >= *start && resp.status <= *end {
                            return true;
                        }
                    }
                }
            }
        }
        false
    }
    
    /// 计算退避延迟
    fn calculate_backoff_delay(&self, attempt: u32) -> Duration {
        let base_delay = self.config.retry.base_delay;
        let max_delay = self.config.retry.max_delay;
        
        // 指数退避算法
        let delay = base_delay * 2_u32.pow(attempt);
        std::cmp::min(delay, max_delay)
    }
}

#[async_trait]
impl ServiceProxy for AdvancedClientProxy {
    async fn call_with_retry(&self, mut request: Request) -> ServiceResult<Response> {
        let context = RequestContext {
            session_id: None,
            user_id: None,
            trace_id: request.headers.get("trace-id").cloned(),
            headers: request.headers.clone(),
            remote_addr: None,
        };
        
        // 执行请求拦截器
        self.execute_request_interceptors(&mut request, &context).await?;
        
        // 检查缓存
        let cache_key = self.generate_cache_key(&request);
        if let Some(cached_response) = self.get_cached_response(&cache_key).await {
            return Ok(cached_response);
        }
        
        let mut last_error: Option<ServiceError> = None;
        let service_name = request.service_name.clone();
        
        for attempt in 0..self.config.retry.max_attempts {
            // 获取连接许可
            let _permit = self.connection_semaphore.acquire().await
                .map_err(|_| ServiceError::NetworkError("Failed to acquire connection permit".to_string()))?;
            
            if attempt > 0 {
                let delay = self.calculate_backoff_delay(attempt - 1);
                sleep(delay).await;
            }
            
            let start_time = Instant::now();
            
            // 执行带超时的请求
            let result = timeout(
                self.config.timeout.request_timeout,
                self.call_internal(&request)
            ).await;
            
            let response_time = start_time.elapsed();
            
            match result {
                Ok(Ok(mut response)) => {
                    // 执行响应拦截器
                    if let Err(e) = self.execute_response_interceptors(&request, &mut response, &context).await {
                        self.execute_error_interceptors(&request, &e, &context).await.ok();
                        return Err(e);
                    }
                    
                    // 更新统计
                    self.update_connection_stats(&service_name, true, response_time).await;
                    
                    // 缓存响应
                    self.cache_response(cache_key, &response).await;
                    
                    return Ok(response);
                }
                Ok(Err(e)) => {
                    // 更新统计
                    self.update_connection_stats(&service_name, false, response_time).await;
                    
                    // 执行错误拦截器
                    self.execute_error_interceptors(&request, &e, &context).await.ok();
                    
                    // 检查是否应该重试
                    if attempt < self.config.retry.max_attempts - 1 && self.should_retry(&e, None) {
                        last_error = Some(e);
                        continue;
                    } else {
                        return Err(e);
                    }
                }
                Err(_) => {
                    // 超时错误
                    let timeout_error = ServiceError::TimeoutError("Request timeout".to_string());
                    self.update_connection_stats(&service_name, false, response_time).await;
                    self.execute_error_interceptors(&request, &timeout_error, &context).await.ok();
                    
                    if attempt < self.config.retry.max_attempts - 1 && self.should_retry(&timeout_error, None) {
                        last_error = Some(timeout_error);
                        continue;
                    } else {
                        return Err(timeout_error);
                    }
                }
            }
        }
        
        Err(last_error.unwrap_or_else(|| ServiceError::NetworkError("All retry attempts failed".to_string())))
    }
    
    async fn batch_call(&self, requests: Vec<Request>) -> Vec<ServiceResult<Response>> {
        // 使用并发处理批量请求
        let futures = requests.into_iter().map(|request| {
            self.call_with_retry(request)
        });
        
        // 等待所有请求完成
        futures::future::join_all(futures).await
    }
    
    async fn get_call_stats(&self) -> CallStats {
        let stats_map = self.connection_stats.read().await;
        let total_requests: u64 = stats_map.values().map(|s| s.total_requests).sum();
        let successful_requests: u64 = stats_map.values().map(|s| s.successful_requests).sum();
        let failed_requests: u64 = stats_map.values().map(|s| s.failed_requests).sum();
        
        // 计算加权平均响应时间
        let avg_response_time = if !stats_map.is_empty() {
            let total_time: u128 = stats_map.values()
                .map(|s| s.average_response_time.as_millis() * s.total_requests as u128)
                .sum();
            Duration::from_millis((total_time / total_requests.max(1) as u128) as u64)
        } else {
            Duration::from_millis(0)
        };
        
        let last_call_time = stats_map.values()
            .filter_map(|s| s.last_request_time)
            .max();
        
        CallStats {
            total_calls: total_requests,
            successful_calls: successful_requests,
            failed_calls: failed_requests,
            average_response_time: avg_response_time,
            last_call_time,
        }
    }
    
    async fn get_available_instances(&self, service_name: &str) -> ServiceResult<Vec<ServiceInstance>> {
        self.registry.discover(service_name).await
    }
    
    async fn refresh_service_cache(&self, service_name: &str) -> ServiceResult<()> {
        // 强制刷新服务发现缓存
        self.registry.discover(service_name).await?;
        Ok(())
    }
}

impl AdvancedClientProxy {
    /// 内部调用方法
    async fn call_internal(&self, request: &Request) -> ServiceResult<Response> {
        let service_name = request.service_name.clone();
        
        // 通过熔断器执行调用
        self.circuit_breaker.call(
            &service_name,
            Box::new({
                let registry = self.registry.clone();
                let load_balancer = self.load_balancer.clone();
                let transport = self.transport.clone();
                let request_clone = request.clone();
                let service_name_clone = service_name.clone();
                
                move || {
                    Box::pin(async move {
                        // 服务发现
                        let instances = registry.discover(&service_name_clone).await?;
                        if instances.is_empty() {
                            return Err(ServiceError::NoAvailableInstances(service_name_clone));
                        }
                        
                        // 负载均衡选择实例
                        let instance = load_balancer
                            .select(&service_name_clone, &instances)
                            .await?
                            .ok_or_else(|| ServiceError::NoAvailableInstances(service_name_clone))?;
                        
                        // 发送请求
                        transport.send(request_clone, &instance).await
                    })
                }
            })
        ).await
    }
    
    /// 获取指定服务的连接统计
    pub async fn get_service_stats(&self, service_name: &str) -> Option<ConnectionStats> {
        let stats_map = self.connection_stats.read().await;
        stats_map.get(service_name).cloned()
    }
    
    /// 获取缓存统计
    pub async fn get_cache_stats(&self) -> (usize, usize) {
        let cache = self.response_cache.read().await;
        let total_entries = cache.len();
        let expired_entries = cache.values().filter(|entry| entry.is_expired()).count();
        (total_entries, expired_entries)
    }
}