use std::sync::{Arc, Mutex, RwLock};
use std::time::{Duration, Instant};
use std::collections::{HashMap, VecDeque};
use super::data_pipeline::{TaskPriority, TaskStatus, ComputeTask, TaskResult};

/// 负载均衡策略
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum LoadBalanceStrategy {
    RoundRobin,
    LeastLoaded,
    Adaptive,
}

/// 工作线程负载信息
#[derive(Debug, Clone)]
pub struct WorkerLoad {
    pub worker_id: usize,
    pub active_tasks: usize,
    pub completed_tasks: usize,
    pub avg_execution_time: Duration,
    pub cpu_utilization: f64,
    pub memory_usage: f64,
    pub last_task_completed: Option<Instant>,
}

impl WorkerLoad {
    pub fn new(worker_id: usize) -> Self {
        WorkerLoad {
            worker_id,
            active_tasks: 0,
            completed_tasks: 0,
            avg_execution_time: Duration::from_secs(0),
            cpu_utilization: 0.0,
            memory_usage: 0.0,
            last_task_completed: None,
        }
    }

    pub fn update_metrics(&mut self, execution_time: Duration, cpu_util: f64, mem_usage: f64) {
        self.completed_tasks += 1;
        self.last_task_completed = Some(Instant::now());
        
        // 更新平均执行时间（使用指数移动平均）
        if self.completed_tasks == 1 {
            self.avg_execution_time = execution_time;
        } else {
            let alpha = 0.2; // 平滑因子
            let old_avg = self.avg_execution_time.as_secs_f64();
            let new_time = execution_time.as_secs_f64();
            self.avg_execution_time = Duration::from_secs_f64(
                old_avg * (1.0 - alpha) + new_time * alpha
            );
        }

        self.cpu_utilization = cpu_util;
        self.memory_usage = mem_usage;
    }

    pub fn get_load_score(&self) -> f64 {
        // 计算综合负载分数
        let execution_weight = 0.4;
        let cpu_weight = 0.4;
        let memory_weight = 0.2;

        let execution_score = self.avg_execution_time.as_secs_f64() / 1.0; // 归一化到0-1
        let cpu_score = self.cpu_utilization;
        let memory_score = self.memory_usage;

        execution_score * execution_weight +
        cpu_score * cpu_weight +
        memory_score * memory_weight
    }
}

/// 自适应负载均衡器
pub struct AdaptiveLoadBalancer {
    worker_loads: Arc<RwLock<Vec<WorkerLoad>>>,
    strategy: LoadBalanceStrategy,
    last_adjustment: Instant,
    adjustment_interval: Duration,
}

impl AdaptiveLoadBalancer {
    pub fn new(num_workers: usize) -> Self {
        let mut worker_loads = Vec::with_capacity(num_workers);
        for i in 0..num_workers {
            worker_loads.push(WorkerLoad::new(i));
        }

        AdaptiveLoadBalancer {
            worker_loads: Arc::new(RwLock::new(worker_loads)),
            strategy: LoadBalanceStrategy::Adaptive,
            last_adjustment: Instant::now(),
            adjustment_interval: Duration::from_secs(5),
        }
    }

    pub fn select_worker(&self) -> usize {
        let loads = self.worker_loads.read().unwrap();
        
        match self.strategy {
            LoadBalanceStrategy::RoundRobin => {
                // 简单轮询
                loads.iter()
                    .min_by_key(|load| load.active_tasks)
                    .map(|load| load.worker_id)
                    .unwrap_or(0)
            },
            LoadBalanceStrategy::LeastLoaded => {
                // 选择负载最小的工作线程
                loads.iter()
                    .min_by(|a, b| {
                        a.get_load_score()
                            .partial_cmp(&b.get_load_score())
                            .unwrap()
                    })
                    .map(|load| load.worker_id)
                    .unwrap_or(0)
            },
            LoadBalanceStrategy::Adaptive => {
                // 自适应选择策略
                self.adaptive_select_worker(&loads)
            }
        }
    }

    fn adaptive_select_worker(&self, loads: &[WorkerLoad]) -> usize {
        // 计算每个工作线程的健康度分数
        let scores: Vec<(usize, f64)> = loads.iter()
            .map(|load| {
                let base_score = 1.0 - load.get_load_score();
                
                // 考虑最近完成任务的时间
                let recency_bonus = if let Some(last_completed) = load.last_task_completed {
                    let elapsed = last_completed.elapsed();
                    if elapsed < Duration::from_secs(1) {
                        0.2 // 最近完成任务的工作线程获得额外分数
                    } else {
                        0.0
                    }
                } else {
                    0.0
                };

                // 考虑任务完成历史
                let completion_bonus = (load.completed_tasks as f64).min(100.0) / 100.0 * 0.1;

                (load.worker_id, base_score + recency_bonus + completion_bonus)
            })
            .collect();

        // 使用软最大选择算法
        let temperature = 0.1; // 控制随机性
        let exp_scores: Vec<f64> = scores.iter()
            .map(|(_, score)| (score / temperature).exp())
            .collect();
        
        let sum: f64 = exp_scores.iter().sum();
        let normalized: Vec<f64> = exp_scores.iter()
            .map(|score| score / sum)
            .collect();

        // 随机选择，概率与分数成正比
        let mut rng = rand::thread_rng();
        let rand_val: f64 = rng.gen();
        
        let mut cumsum = 0.0;
        for (i, prob) in normalized.iter().enumerate() {
            cumsum += prob;
            if rand_val <= cumsum {
                return scores[i].0;
            }
        }

        scores[0].0 // 默认返回第一个工作线程
    }

    pub fn update_worker_load(
        &self,
        worker_id: usize,
        execution_time: Duration,
        cpu_util: f64,
        mem_usage: f64
    ) {
        let mut loads = self.worker_loads.write().unwrap();
        if let Some(load) = loads.get_mut(worker_id) {
            load.update_metrics(execution_time, cpu_util, mem_usage);
        }

        // 定期调整负载均衡策略
        if self.last_adjustment.elapsed() >= self.adjustment_interval {
            self.adjust_strategy(&loads);
        }
    }

    fn adjust_strategy(&self, loads: &[WorkerLoad]) {
        // 分析负载分布情况
        let load_scores: Vec<f64> = loads.iter()
            .map(|load| load.get_load_score())
            .collect();

        // 计算负载的标准差
        let mean = load_scores.iter().sum::<f64>() / load_scores.len() as f64;
        let variance = load_scores.iter()
            .map(|&score| (score - mean).powi(2))
            .sum::<f64>() / load_scores.len() as f64;
        let std_dev = variance.sqrt();

        // 根据负载分布调整策略
        let mut strategy = self.strategy;
        if std_dev > 0.3 { // 负载分布不均匀
            strategy = LoadBalanceStrategy::LeastLoaded;
        } else if std_dev < 0.1 { // 负载分布均匀
            strategy = LoadBalanceStrategy::RoundRobin;
        } else {
            strategy = LoadBalanceStrategy::Adaptive;
        }

        // 更新策略
        if strategy != self.strategy {
            println!("Adjusting load balance strategy to {:?}", strategy);
            self.strategy = strategy;
        }
    }
}

/// 动态优先级管理器
pub struct DynamicPriorityManager {
    task_history: Arc<Mutex<HashMap<u64, TaskHistory>>>,
    priority_thresholds: Arc<RwLock<PriorityThresholds>>,
}

struct TaskHistory {
    original_priority: TaskPriority,
    execution_times: VecDeque<Duration>,
    completion_rate: f64,
    last_execution: Option<Instant>,
}

struct PriorityThresholds {
    execution_time: HashMap<TaskPriority, Duration>,
    completion_rate: HashMap<TaskPriority, f64>,
}

impl DynamicPriorityManager {
    pub fn new() -> Self {
        let mut execution_time = HashMap::new();
        execution_time.insert(TaskPriority::Low, Duration::from_millis(1000));
        execution_time.insert(TaskPriority::Normal, Duration::from_millis(500));
        execution_time.insert(TaskPriority::High, Duration::from_millis(200));
        execution_time.insert(TaskPriority::Critical, Duration::from_millis(100));

        let mut completion_rate = HashMap::new();
        completion_rate.insert(TaskPriority::Low, 0.8);
        completion_rate.insert(TaskPriority::Normal, 0.9);
        completion_rate.insert(TaskPriority::High, 0.95);
        completion_rate.insert(TaskPriority::Critical, 0.99);

        DynamicPriorityManager {
            task_history: Arc::new(Mutex::new(HashMap::new())),
            priority_thresholds: Arc::new(RwLock::new(PriorityThresholds {
                execution_time,
                completion_rate,
            })),
        }
    }

    pub fn register_task(&self, task_id: u64, priority: TaskPriority) {
        let mut history = self.task_history.lock().unwrap();
        history.insert(task_id, TaskHistory {
            original_priority: priority,
            execution_times: VecDeque::with_capacity(10),
            completion_rate: 1.0,
            last_execution: None,
        });
    }

    pub fn update_task_metrics(
        &self,
        task_id: u64,
        execution_time: Duration,
        status: TaskStatus
    ) -> Option<TaskPriority> {
        let mut history = self.task_history.lock().unwrap();
        let task_history = history.get_mut(&task_id)?;

        // 更新执行时间历史
        task_history.execution_times.push_back(execution_time);
        if task_history.execution_times.len() > 10 {
            task_history.execution_times.pop_front();
        }

        // 更新完成率
        let success = status == TaskStatus::Completed;
        task_history.completion_rate = task_history.completion_rate * 0.9 +
                                     (if success { 1.0 } else { 0.0 }) * 0.1;
        task_history.last_execution = Some(Instant::now());

        // 计算新的优先级
        self.calculate_new_priority(task_history)
    }

    fn calculate_new_priority(&self, history: &TaskHistory) -> Option<TaskPriority> {
        let thresholds = self.priority_thresholds.read().unwrap();

        // 计算平均执行时间
        let avg_execution_time: Duration = if history.execution_times.is_empty() {
            Duration::from_secs(0)
        } else {
            let total = history.execution_times.iter().sum::<Duration>();
            total / history.execution_times.len() as u32
        };

        // 根据执行时间和完成率确定新的优先级
        let current_priority = history.original_priority;
        let mut new_priority = current_priority;

        // 检查是否需要提升优先级
        if avg_execution_time > thresholds.execution_time[&current_priority] ||
           history.completion_rate < thresholds.completion_rate[&current_priority] {
            new_priority = match current_priority {
                TaskPriority::Low => TaskPriority::Normal,
                TaskPriority::Normal => TaskPriority::High,
                TaskPriority::High => TaskPriority::Critical,
                TaskPriority::Critical => TaskPriority::Critical,
            };
        }
        // 检查是否可以降低优先级
        else if avg_execution_time < thresholds.execution_time[&current_priority] / 2 &&
                history.completion_rate > thresholds.completion_rate[&current_priority] * 1.1 {
            new_priority = match current_priority {
                TaskPriority::Critical => TaskPriority::High,
                TaskPriority::High => TaskPriority::Normal,
                TaskPriority::Normal => TaskPriority::Low,
                TaskPriority::Low => TaskPriority::Low,
            };
        }

        if new_priority != current_priority {
            Some(new_priority)
        } else {
            None
        }
    }

    pub fn adjust_thresholds(&self, global_stats: &GlobalExecutionStats) {
        let mut thresholds = self.priority_thresholds.write().unwrap();
        
        // 根据全局统计信息调整阈值
        for (&priority, &avg_time) in &global_stats.avg_execution_time {
            if let Some(threshold) = thresholds.execution_time.get_mut(&priority) {
                // 动态调整执行时间阈值
                let adjustment_factor = if avg_time > *threshold {
                    1.1 // 增加阈值
                } else {
                    0.9 // 减少阈值
                };
                *threshold = (*threshold).mul_f64(adjustment_factor);
            }
        }

        for (&priority, &rate) in &global_stats.completion_rates {
            if let Some(threshold) = thresholds.completion_rate.get_mut(&priority) {
                // 动态调整完成率阈值
                let adjustment_factor = if rate < *threshold {
                    0.95 // 降低要求
                } else {
                    1.05 // 提高要求
                };
                *threshold = (*threshold * adjustment_factor).clamp(0.5, 0.99);
            }
        }
    }
}

/// 全局执行统计信息
pub struct GlobalExecutionStats {
    pub avg_execution_time: HashMap<TaskPriority, Duration>,
    pub completion_rates: HashMap<TaskPriority, f64>,
    pub total_tasks: HashMap<TaskPriority, usize>,
}

impl GlobalExecutionStats {
    pub fn new() -> Self {
        GlobalExecutionStats {
            avg_execution_time: HashMap::new(),
            completion_rates: HashMap::new(),
            total_tasks: HashMap::new(),
        }
    }

    pub fn update(&mut self, priority: TaskPriority, execution_time: Duration, success: bool) {
        // 更新平均执行时间
        let avg_time = self.avg_execution_time.entry(priority).or_insert(execution_time);
        *avg_time = (*avg_time * 9 + execution_time) / 10; // 指数移动平均

        // 更新完成率
        let rate = self.completion_rates.entry(priority).or_insert(1.0);
        *rate = *rate * 0.9 + (if success { 1.0 } else { 0.0 }) * 0.1;

        // 更新总任务数
        *self.total_tasks.entry(priority).or_insert(0) += 1;
    }
}

#[cfg(test)]
mod tests {
    use super::*;
    use rand::Rng;

    #[test]
    fn test_adaptive_load_balancer() {
        let balancer = AdaptiveLoadBalancer::new(4);

        // 模拟任务执行
        for _ in 0..100 {
            let worker_id = balancer.select_worker();
            let execution_time = Duration::from_millis(rand::thread_rng().gen_range(50..200));
            let cpu_util = rand::thread_rng().gen_range(0.0..1.0);
            let mem_usage = rand::thread_rng().gen_range(0.0..1.0);

            balancer.update_worker_load(worker_id, execution_time, cpu_util, mem_usage);
        }

        // 验证负载分布
        let loads = balancer.worker_loads.read().unwrap();
        for load in loads.iter() {
            assert!(load.completed_tasks > 0);
            assert!(load.avg_execution_time > Duration::from_millis(0));
        }
    }

    #[test]
    fn test_dynamic_priority_manager() {
        let manager = DynamicPriorityManager::new();
        let task_id = 1;

        // 注册任务
        manager.register_task(task_id, TaskPriority::Normal);

        // 模拟多次任务执行
        for i in 0..10 {
            let execution_time = if i < 5 {
                Duration::from_millis(600) // 较慢的执行时间
            } else {
                Duration::from_millis(100) // 较快的执行时间
            };

            let new_priority = manager.update_task_metrics(
                task_id,
                execution_time,
                TaskStatus::Completed
            );

            // 验证优先级调整
            if i == 4 {
                assert_eq!(new_priority, Some(TaskPriority::High));
            } else if i == 9 {
                assert_eq!(new_priority, Some(TaskPriority::Normal));
            }
        }
    }
}