use async_trait::async_trait;
use rustcloud_core::{
    LoadBalancer, ServiceInstance, ServiceResult, ServiceError, RequestContext, InstanceMetrics, StrategyInfo
};
use crate::{RoundRobin, Random, WeightedRoundRobin, ConsistentHash, SmoothWeightedRoundRobin, AdvancedConsistentHash, LeastConnections};
use std::collections::HashMap;
use std::sync::{Arc, RwLock, Mutex};
use std::time::{Duration, SystemTime, Instant};
use serde::{Deserialize, Serialize};
use tracing::{info, warn, error, debug};

/// 智能负载均衡器配置
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IntelligentLoadBalancerConfig {
    /// 性能指标收集间隔
    pub metrics_collection_interval: Duration,
    /// 策略评估窗口大小
    pub evaluation_window: Duration,
    /// 最小请求数阈值（用于策略切换）
    pub min_requests_for_evaluation: u64,
    /// 响应时间权重
    pub response_time_weight: f64,
    /// 成功率权重
    pub success_rate_weight: f64,
    /// 负载权重
    pub load_weight: f64,
    /// 自适应学习率
    pub learning_rate: f64,
    /// 策略切换阈值
    pub strategy_switch_threshold: f64,
    /// 预测窗口大小
    pub prediction_window_size: usize,
}

impl Default for IntelligentLoadBalancerConfig {
    fn default() -> Self {
        Self {
            metrics_collection_interval: Duration::from_secs(10),
            evaluation_window: Duration::from_secs(60),
            min_requests_for_evaluation: 10,
            response_time_weight: 0.4,
            success_rate_weight: 0.4,
            load_weight: 0.2,
            learning_rate: 0.1,
            strategy_switch_threshold: 0.15,
            prediction_window_size: 10,
        }
    }
}

/// 实例性能历史数据
#[derive(Debug, Clone)]
struct InstancePerformanceHistory {
    /// 响应时间历史（最近N次）
    response_times: Vec<Duration>,
    /// 成功率历史
    success_rates: Vec<f64>,
    /// 负载历史
    loads: Vec<f64>,
    /// 时间戳
    timestamps: Vec<SystemTime>,
    /// 当前性能得分
    current_score: f64,
    /// 预测性能得分
    predicted_score: f64,
}

impl InstancePerformanceHistory {
    fn new() -> Self {
        Self {
            response_times: Vec::new(),
            success_rates: Vec::new(),
            loads: Vec::new(),
            timestamps: Vec::new(),
            current_score: 1.0,
            predicted_score: 1.0,
        }
    }

    /// 添加性能数据点
    fn add_data_point(
        &mut self, 
        response_time: Duration, 
        success_rate: f64, 
        load: f64,
        config: &IntelligentLoadBalancerConfig,
    ) {
        let now = SystemTime::now();
        
        // 保持窗口大小
        if self.response_times.len() >= config.prediction_window_size {
            self.response_times.remove(0);
            self.success_rates.remove(0);
            self.loads.remove(0);
            self.timestamps.remove(0);
        }
        
        self.response_times.push(response_time);
        self.success_rates.push(success_rate);
        self.loads.push(load);
        self.timestamps.push(now);
        
        // 更新当前性能得分
        self.update_current_score(config);
        // 预测下一个得分
        self.update_predicted_score();
    }
    
    /// 更新当前性能得分
    fn update_current_score(&mut self, config: &IntelligentLoadBalancerConfig) {
        if self.response_times.is_empty() {
            return;
        }
        
        let avg_response_time = self.response_times.iter()
            .map(|d| d.as_millis() as f64)
            .sum::<f64>() / self.response_times.len() as f64;
            
        let avg_success_rate = self.success_rates.iter().sum::<f64>() / self.success_rates.len() as f64;
        let avg_load = self.loads.iter().sum::<f64>() / self.loads.len() as f64;
        
        // 计算归一化得分
        let response_time_score = 1.0 / (1.0 + avg_response_time / 1000.0); // 响应时间越低得分越高
        let success_rate_score = avg_success_rate; // 成功率直接作为得分
        let load_score = 1.0 / (1.0 + avg_load); // 负载越低得分越高
        
        // 加权计算总得分
        self.current_score = response_time_score * config.response_time_weight
            + success_rate_score * config.success_rate_weight
            + load_score * config.load_weight;
    }
    
    /// 使用简单的移动平均预测下一个性能得分
    fn update_predicted_score(&mut self) {
        if self.response_times.len() < 3 {
            self.predicted_score = self.current_score;
            return;
        }
        
        // 计算最近几次的得分趋势
        let recent_scores: Vec<f64> = self.response_times.iter()
            .zip(&self.success_rates)
            .zip(&self.loads)
            .map(|((rt, sr), load)| {
                let rt_score = 1.0 / (1.0 + rt.as_millis() as f64 / 1000.0);
                let load_score = 1.0 / (1.0 + load);
                (rt_score + sr + load_score) / 3.0
            })
            .collect();
        
        // 简单的线性预测
        if recent_scores.len() >= 2 {
            let last = recent_scores[recent_scores.len() - 1];
            let second_last = recent_scores[recent_scores.len() - 2];
            let trend = last - second_last;
            self.predicted_score = (last + trend).clamp(0.0, 1.0);
        } else {
            self.predicted_score = self.current_score;
        }
    }
}

/// 策略性能统计
#[derive(Debug, Clone)]
struct StrategyPerformance {
    /// 策略名称
    name: String,
    /// 请求总数
    total_requests: u64,
    /// 成功请求数
    successful_requests: u64,
    /// 平均响应时间
    avg_response_time: Duration,
    /// 性能得分历史
    performance_scores: Vec<f64>,
    /// 最后更新时间
    last_updated: SystemTime,
}

impl StrategyPerformance {
    fn new(name: String) -> Self {
        Self {
            name,
            total_requests: 0,
            successful_requests: 0,
            avg_response_time: Duration::from_millis(0),
            performance_scores: Vec::new(),
            last_updated: SystemTime::now(),
        }
    }
    
    fn success_rate(&self) -> f64 {
        if self.total_requests == 0 {
            1.0
        } else {
            self.successful_requests as f64 / self.total_requests as f64
        }
    }
    
    fn update_performance(&mut self, response_time: Duration, success: bool) {
        self.total_requests += 1;
        if success {
            self.successful_requests += 1;
        }
        
        // 更新平均响应时间（移动平均）
        let weight = 0.9;
        self.avg_response_time = Duration::from_millis(
            (self.avg_response_time.as_millis() as f64 * weight + response_time.as_millis() as f64 * (1.0 - weight)) as u64
        );
        
        // 计算性能得分
        let score = self.calculate_performance_score();
        
        // 保持最近10个得分
        if self.performance_scores.len() >= 10 {
            self.performance_scores.remove(0);
        }
        self.performance_scores.push(score);
        
        self.last_updated = SystemTime::now();
    }
    
    fn calculate_performance_score(&self) -> f64 {
        let response_time_score = 1.0 / (1.0 + self.avg_response_time.as_millis() as f64 / 1000.0);
        let success_rate_score = self.success_rate();
        
        // 简单的加权得分
        (response_time_score + success_rate_score) / 2.0
    }
    
    fn get_current_performance_score(&self) -> f64 {
        self.performance_scores.last().copied().unwrap_or(0.5)
    }
}

/// 智能负载均衡器
/// 
/// 该负载均衡器能够：
/// 1. 实时监控各个实例的性能指标
/// 2. 自动选择最佳的负载均衡策略
/// 3. 基于机器学习预测实例性能
/// 4. 动态调整路由权重
pub struct IntelligentLoadBalancer {
    /// 配置
    config: IntelligentLoadBalancerConfig,
    /// 可用的负载均衡策略
    strategies: Vec<Box<dyn LoadBalancer + Send + Sync>>,
    /// 当前选择的策略索引
    current_strategy_index: Arc<RwLock<usize>>,
    /// 策略性能统计
    strategy_performances: Arc<RwLock<Vec<StrategyPerformance>>>,
    /// 实例性能历史
    instance_histories: Arc<RwLock<HashMap<String, InstancePerformanceHistory>>>,
    /// 实例实时指标
    instance_metrics: Arc<RwLock<HashMap<String, InstanceMetrics>>>,
    /// 请求计数器
    request_counter: Arc<Mutex<u64>>,
    /// 最后评估时间
    last_evaluation: Arc<RwLock<SystemTime>>,
}

impl IntelligentLoadBalancer {
    pub fn new(config: IntelligentLoadBalancerConfig) -> Self {
        let strategies: Vec<Box<dyn LoadBalancer + Send + Sync>> = vec![
            Box::new(RoundRobin::new()),
            Box::new(Random::new()),
            Box::new(WeightedRoundRobin::new()),
            Box::new(ConsistentHash::new()),
            Box::new(SmoothWeightedRoundRobin::new()),
            Box::new(AdvancedConsistentHash::new()),
            Box::new(LeastConnections::new()),
        ];
        
        let strategy_performances = strategies.iter()
            .map(|s| StrategyPerformance::new(s.name().to_string()))
            .collect();
        
        Self {
            config,
            strategies,
            current_strategy_index: Arc::new(RwLock::new(0)), // 默认使用轮询
            strategy_performances: Arc::new(RwLock::new(strategy_performances)),
            instance_histories: Arc::new(RwLock::new(HashMap::new())),
            instance_metrics: Arc::new(RwLock::new(HashMap::new())),
            request_counter: Arc::new(Mutex::new(0)),
            last_evaluation: Arc::new(RwLock::new(SystemTime::now())),
        }
    }
    
    pub fn with_default_config() -> Self {
        Self::new(IntelligentLoadBalancerConfig::default())
    }
    
    /// 智能选择最佳实例
    async fn intelligent_select_instance(
        &self,
        service_name: &str,
        instances: &[ServiceInstance],
        context: Option<&RequestContext>,
    ) -> ServiceResult<Option<ServiceInstance>> {
        if instances.is_empty() {
            return Ok(None);
        }
        
        // 过滤健康实例
        let healthy_instances: Vec<&ServiceInstance> = instances
            .iter()
            .filter(|instance| instance.healthy)
            .collect();
            
        if healthy_instances.is_empty() {
            return Ok(None);
        }
        
        // 更新请求计数
        {
            let mut counter = self.request_counter.lock().unwrap();
            *counter += 1;
        }
        
        // 检查是否需要重新评估策略
        if self.should_reevaluate_strategy().await {
            self.evaluate_and_switch_strategy(&healthy_instances).await?;
        }
        
        // 获取当前最佳策略
        let current_strategy_index = *self.current_strategy_index.read().unwrap();
        let current_strategy = &self.strategies[current_strategy_index];
        
        // 使用当前策略选择实例，但加上智能权重调整
        let selected = match context {
            Some(ctx) => current_strategy.select_with_context(service_name, instances, ctx).await?,
            None => current_strategy.select(service_name, instances).await?,
        };
        
        // 如果策略没有选中实例，使用智能选择作为后备
        if selected.is_none() {
            return self.intelligent_fallback_select(&healthy_instances).await;
        }
        
        Ok(selected)
    }
    
    /// 检查是否需要重新评估策略
    async fn should_reevaluate_strategy(&self) -> bool {
        let last_eval = *self.last_evaluation.read().unwrap();
        let now = SystemTime::now();
        
        if let Ok(elapsed) = now.duration_since(last_eval) {
            elapsed >= self.config.evaluation_window
        } else {
            true
        }
    }
    
    /// 评估并切换到最佳策略
    async fn evaluate_and_switch_strategy(&self, instances: &[&ServiceInstance]) -> ServiceResult<()> {
        debug!("Evaluating and potentially switching load balancing strategy");
        
        let mut best_strategy_index = 0;
        let mut best_score = 0.0;
        
        // 评估每个策略的性能
        if let Ok(performances) = self.strategy_performances.read() {
            for (index, perf) in performances.iter().enumerate() {
                if perf.total_requests >= self.config.min_requests_for_evaluation {
                    let score = perf.get_current_performance_score();
                    debug!("Strategy {} score: {:.3}", perf.name, score);
                    
                    if score > best_score {
                        best_score = score;
                        best_strategy_index = index;
                    }
                }
            }
        }
        
        // 检查是否需要切换策略
        let current_index = *self.current_strategy_index.read().unwrap();
        if current_index != best_strategy_index {
            let current_score = {
                let performances = self.strategy_performances.read().unwrap();
                performances[current_index].get_current_performance_score()
            };
            
            // 只有当新策略明显更好时才切换
            if best_score > current_score + self.config.strategy_switch_threshold {
                info!(
                    "Switching load balancing strategy from {} to {} (score improvement: {:.3} -> {:.3})",
                    self.strategies[current_index].name(),
                    self.strategies[best_strategy_index].name(),
                    current_score,
                    best_score
                );
                
                *self.current_strategy_index.write().unwrap() = best_strategy_index;
            }
        }
        
        // 更新最后评估时间
        *self.last_evaluation.write().unwrap() = SystemTime::now();
        
        Ok(())
    }
    
    /// 智能后备选择（基于历史性能数据）
    async fn intelligent_fallback_select(&self, instances: &[&ServiceInstance]) -> ServiceResult<Option<ServiceInstance>> {
        let histories = self.instance_histories.read().unwrap();
        
        let mut best_instance = instances[0];
        let mut best_score = 0.0;
        
        for &instance in instances {
            let score = if let Some(history) = histories.get(&instance.address()) {
                // 使用预测得分进行选择
                history.predicted_score
            } else {
                // 新实例，给予默认得分
                0.5
            };
            
            // 考虑实例权重
            let weighted_score = score * instance.weight;
            
            if weighted_score > best_score {
                best_score = weighted_score;
                best_instance = instance;
            }
        }
        
        debug!("Intelligent fallback selected instance {} with score {:.3}", 
               best_instance.address(), best_score);
        
        Ok(Some(best_instance.clone()))
    }
    
    /// 更新策略性能统计
    async fn update_strategy_performance(&self, response_time: Duration, success: bool) {
        let current_index = *self.current_strategy_index.read().unwrap();
        
        if let Ok(mut performances) = self.strategy_performances.write() {
            if let Some(perf) = performances.get_mut(current_index) {
                perf.update_performance(response_time, success);
            }
        }
    }
    
    /// 获取策略性能报告
    pub async fn get_strategy_performance_report(&self) -> HashMap<String, (f64, u64, f64)> {
        let mut report = HashMap::new();
        
        if let Ok(performances) = self.strategy_performances.read() {
            for perf in performances.iter() {
                report.insert(
                    perf.name.clone(),
                    (
                        perf.get_current_performance_score(),
                        perf.total_requests,
                        perf.success_rate(),
                    ),
                );
            }
        }
        
        report
    }
    
    /// 获取当前使用的策略信息
    pub async fn get_current_strategy_info(&self) -> StrategyInfo {
        let current_index = *self.current_strategy_index.read().unwrap();
        self.strategies[current_index].get_strategy_info()
    }
    
    /// 强制切换到指定策略
    pub async fn force_switch_strategy(&self, strategy_name: &str) -> ServiceResult<()> {
        for (index, strategy) in self.strategies.iter().enumerate() {
            if strategy.name() == strategy_name {
                *self.current_strategy_index.write().unwrap() = index;
                info!("Manually switched to strategy: {}", strategy_name);
                return Ok(());
            }
        }
        
        Err(ServiceError::LoadBalancerError(
            format!("Strategy '{}' not found", strategy_name)
        ))
    }
}

#[async_trait]
impl LoadBalancer for IntelligentLoadBalancer {
    async fn select(
        &self,
        service_name: &str,
        instances: &[ServiceInstance],
    ) -> ServiceResult<Option<ServiceInstance>> {
        let start_time = Instant::now();
        
        let result = self.intelligent_select_instance(service_name, instances, None).await;
        
        let elapsed = start_time.elapsed();
        let success = result.is_ok() && result.as_ref().unwrap().is_some();
        
        // 更新策略性能统计
        self.update_strategy_performance(elapsed, success).await;
        
        result
    }
    
    async fn select_with_context(
        &self,
        service_name: &str,
        instances: &[ServiceInstance],
        context: &RequestContext,
    ) -> ServiceResult<Option<ServiceInstance>> {
        let start_time = Instant::now();
        
        let result = self.intelligent_select_instance(service_name, instances, Some(context)).await;
        
        let elapsed = start_time.elapsed();
        let success = result.is_ok() && result.as_ref().unwrap().is_some();
        
        // 更新策略性能统计
        self.update_strategy_performance(elapsed, success).await;
        
        result
    }
    
    async fn update_instance_metrics(&self, instance: &ServiceInstance, metrics: InstanceMetrics) {
        // 更新实时指标
        {
            let mut instance_metrics = self.instance_metrics.write().unwrap();
            instance_metrics.insert(instance.address(), metrics.clone());
        }
        
        // 更新历史数据
        let mut histories = self.instance_histories.write().unwrap();
        let history = histories.entry(instance.address())
            .or_insert_with(InstancePerformanceHistory::new);
        
        let load = (metrics.active_connections as f64 / 1000.0).min(1.0); // 归一化负载
        
        history.add_data_point(
            metrics.response_time,
            metrics.success_rate,
            load,
            &self.config,
        );
        
        debug!("Updated metrics for instance {}: score={:.3}, predicted={:.3}", 
               instance.address(), history.current_score, history.predicted_score);
    }
    
    fn get_strategy_info(&self) -> StrategyInfo {
        StrategyInfo {
            name: "Intelligent Load Balancer".to_string(),
            description: "智能负载均衡器，自动选择最佳策略和实例".to_string(),
            requires_session_affinity: false,
            supports_weighted_routing: true,
        }
    }
    
    fn name(&self) -> &str {
        "intelligent"
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use rustcloud_core::ServiceInstance;
    use std::time::Duration;
    
    #[tokio::test]
    async fn test_intelligent_load_balancer_basic() {
        let config = IntelligentLoadBalancerConfig::default();
        let lb = IntelligentLoadBalancer::new(config);
        
        let instances = vec![
            ServiceInstance::new("service1", "127.0.0.1", 8081, "http"),
            ServiceInstance::new("service1", "127.0.0.1", 8082, "http"),
            ServiceInstance::new("service1", "127.0.0.1", 8083, "http"),
        ];
        
        // 基本选择测试
        let result = lb.select("test-service", &instances).await;
        assert!(result.is_ok());
        assert!(result.unwrap().is_some());
    }
    
    #[tokio::test]
    async fn test_intelligent_load_balancer_with_metrics() {
        let config = IntelligentLoadBalancerConfig {
            min_requests_for_evaluation: 1,
            evaluation_window: Duration::from_millis(100),
            ..Default::default()
        };
        let lb = IntelligentLoadBalancer::new(config);
        
        let instances = vec![
            ServiceInstance::new("service1", "127.0.0.1", 8081, "http"),
            ServiceInstance::new("service1", "127.0.0.1", 8082, "http"),
        ];
        
        // 模拟性能指标更新
        let good_metrics = InstanceMetrics {
            response_time: Duration::from_millis(50),
            success_rate: 0.99,
            active_connections: 10,
            cpu_usage: 20.0,
            memory_usage: 30.0,
            last_updated: SystemTime::now(),
        };
        
        let bad_metrics = InstanceMetrics {
            response_time: Duration::from_millis(500),
            success_rate: 0.80,
            active_connections: 100,
            cpu_usage: 80.0,
            memory_usage: 90.0,
            last_updated: SystemTime::now(),
        };
        
        // 更新指标
        lb.update_instance_metrics(&instances[0], good_metrics).await;
        lb.update_instance_metrics(&instances[1], bad_metrics).await;
        
        // 进行多次选择，应该倾向于选择性能好的实例
        let mut selections = HashMap::new();
        for _ in 0..20 {
            if let Ok(Some(instance)) = lb.select("test-service", &instances).await {
                *selections.entry(instance.port).or_insert(0) += 1;
            }
            
            // 小延迟以确保评估窗口过期
            tokio::time::sleep(Duration::from_millis(10)).await;
        }
        
        println!("Selection distribution: {:?}", selections);
        // 性能好的实例应该被选中更多次
        // 注意：由于智能负载均衡器会自动选择策略，结果可能不是100%确定的
    }
    
    #[tokio::test]
    async fn test_strategy_switching() {
        let config = IntelligentLoadBalancerConfig {
            min_requests_for_evaluation: 1,
            evaluation_window: Duration::from_millis(50),
            strategy_switch_threshold: 0.01, // 降低阈值使切换更容易
            ..Default::default()
        };
        let lb = IntelligentLoadBalancer::new(config);
        
        let instances = vec![
            ServiceInstance::new("service1", "127.0.0.1", 8081, "http"),
        ];
        
        // 获取初始策略
        let initial_strategy = lb.get_current_strategy_info().await;
        println!("Initial strategy: {}", initial_strategy.name);
        
        // 进行一些请求以触发策略评估
        for _ in 0..10 {
            let _ = lb.select("test-service", &instances).await;
            tokio::time::sleep(Duration::from_millis(10)).await;
        }
        
        // 获取性能报告
        let report = lb.get_strategy_performance_report().await;
        println!("Performance report: {:?}", report);
        
        // 验证报告包含策略性能数据
        assert!(!report.is_empty());
    }
}